From: William Tu <witu@nvidia.com>
To: <netdev@vger.kernel.org>
Cc: <jiri@nvidia.com>, <bodong@nvidia.com>, <tariqt@nvidia.com>,
<yossiku@nvidia.com>, <kuba@kernel.org>, <witu@nvidia.com>
Subject: [PATCH RFC v3 net-next 2/2] net/mlx5e: Add eswitch shared descriptor devlink
Date: Thu, 7 Mar 2024 01:12:53 +0200 [thread overview]
Message-ID: <20240306231253.8100-2-witu@nvidia.com> (raw)
In-Reply-To: <20240306231253.8100-1-witu@nvidia.com>
Add devlink spool_size attribe support for eswitch shared memory
pool. This is used to configure the shared memory pool for eswitch.
Signed-off-by: William Tu <witu@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/devlink.c | 2 +
.../net/ethernet/mellanox/mlx5/core/eswitch.h | 5 ++
.../mellanox/mlx5/core/eswitch_offloads.c | 49 +++++++++++++++++++
3 files changed, 56 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3e064234f6fe..cc0c50691ecd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -312,6 +312,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
.eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
.eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
.eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
+ .eswitch_spool_size_set = mlx5_devlink_eswitch_spool_size_set,
+ .eswitch_spool_size_get = mlx5_devlink_eswitch_spool_size_get,
.rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set,
.rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set,
.rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 349e28a6dd8d..2e2e3b5c3b3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -378,6 +378,8 @@ struct mlx5_eswitch {
struct mlx5_esw_functions esw_funcs;
struct {
u32 large_group_num;
+ u32 shared_rx_ring_counts;
+ bool enable_shared_rx_ring;
} params;
struct blocking_notifier_head n_head;
struct xarray paired;
@@ -549,6 +551,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
enum devlink_eswitch_encap_mode *encap);
+int mlx5_devlink_eswitch_spool_size_set(struct devlink *devlink, u32 size,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_eswitch_spool_size_get(struct devlink *devlink, u32 *size);
int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
u8 *hw_addr, int *hw_addr_len,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b0455134c98e..e27d9fba8840 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -4019,6 +4019,55 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
return 0;
}
+int mlx5_devlink_eswitch_spool_size_set(struct devlink *devlink,
+ u32 spool_size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ bool enable;
+ int err = 0;
+ int counts;
+
+ esw = mlx5_devlink_eswitch_get(devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ down_write(&esw->mode_lock);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't enable shared pool in switchdev mode");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ counts = spool_size >> PAGE_SHIFT;
+ enable = !(counts == 0);
+ esw->params.enable_shared_rx_ring = enable;
+ esw->params.shared_rx_ring_counts = enable ? counts : 0;
+
+out:
+ up_write(&esw->mode_lock);
+ return err;
+}
+
+int mlx5_devlink_eswitch_spool_size_get(struct devlink *devlink,
+ u32 *spool_size)
+{
+ struct mlx5_eswitch *esw;
+ bool enable;
+
+ esw = mlx5_devlink_eswitch_get(devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ enable = esw->params.enable_shared_rx_ring;
+ if (enable)
+ *spool_size = esw->params.shared_rx_ring_counts << PAGE_SHIFT;
+ else
+ *spool_size = 0;
+
+ return 0;
+}
+
static bool
mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{
--
2.38.1
next prev parent reply other threads:[~2024-03-06 23:13 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-06 23:12 [PATCH RFC v3 net-next 1/2] devlink: Add shared memory pool eswitch attribute William Tu
2024-03-06 23:12 ` William Tu [this message]
2024-03-07 8:35 ` Jiri Pirko
2024-03-08 0:16 ` William Tu
2024-03-09 4:44 ` Jakub Kicinski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240306231253.8100-2-witu@nvidia.com \
--to=witu@nvidia.com \
--cc=bodong@nvidia.com \
--cc=jiri@nvidia.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=tariqt@nvidia.com \
--cc=yossiku@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).