From: Parav Pandit <parav@nvidia.com>
To: <netdev@vger.kernel.org>, <davem@davemloft.net>,
<edumazet@google.com>, <kuba@kernel.org>, <pabeni@redhat.com>,
<corbet@lwn.net>
Cc: <saeedm@nvidia.com>, <leon@kernel.org>, <jiri@resnulli.us>,
<shayd@nvidia.com>, <danielj@nvidia.com>, <dchumak@nvidia.com>,
<linux-doc@vger.kernel.org>, <linux-rdma@vger.kernel.org>,
Parav Pandit <parav@nvidia.com>, Jiri Pirko <jiri@nvidia.com>
Subject: [net-next 2/2] mlx5/core: Support max_io_eqs for a function
Date: Mon, 1 Apr 2024 12:05:31 +0300 [thread overview]
Message-ID: <20240401090531.574575-3-parav@nvidia.com> (raw)
In-Reply-To: <20240401090531.574575-1-parav@nvidia.com>
Implement get and set for the maximum IO event queues for SF and VF.
This enables administrator on the hypervisor to control the maximum
IO event queues which are typically used to derive the maximum and
default number of net device channels or rdma device completion vectors.
Signed-off-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
.../mellanox/mlx5/core/esw/devlink_port.c | 2 +
.../net/ethernet/mellanox/mlx5/core/eswitch.h | 7 ++
.../mellanox/mlx5/core/eswitch_offloads.c | 94 +++++++++++++++++++
3 files changed, 103 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index d8e739cbcbce..76d1ed93c773 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -98,6 +98,8 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
.port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
.port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
#endif /* CONFIG_XFRM_OFFLOAD */
+ .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get,
+ .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set,
};
static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 349e28a6dd8d..50ce1ea20dd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -573,6 +573,13 @@ int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_en
int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
#endif /* CONFIG_XFRM_OFFLOAD */
+int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
+ u32 *max_io_eqs,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
+ u32 max_io_eqs,
+ struct netlink_ext_ack *extack);
+
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index baaae628b0a0..9d9a06a25cac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -66,6 +66,8 @@
#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+#define MLX5_ESW_MAX_CTRL_EQS 4
+
static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
@@ -4557,3 +4559,95 @@ int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
return err;
}
#endif /* CONFIG_XFRM_OFFLOAD */
+
+int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ u16 vport_num = vport->vport;
+ void *query_ctx;
+ void *hca_caps;
+ u32 max_eqs;
+ int err;
+
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ mutex_lock(&esw->state_lock);
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
+ MLX5_CAP_GENERAL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ max_eqs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_eqs);
+ if (max_eqs < MLX5_ESW_MAX_CTRL_EQS)
+ *max_io_eqs = 0;
+ else
+ *max_io_eqs = max_eqs - MLX5_ESW_MAX_CTRL_EQS;
+out:
+ mutex_unlock(&esw->state_lock);
+ return 0;
+}
+
+int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ u16 vport_num = vport->vport;
+ u16 max_eqs = max_io_eqs + MLX5_ESW_MAX_CTRL_EQS;
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
+ }
+
+ if (max_io_eqs + MLX5_ESW_MAX_CTRL_EQS > USHRT_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range");
+ return -EINVAL;
+ }
+
+ mutex_lock(&esw->state_lock);
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
+ MLX5_CAP_GENERAL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out_free;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ MLX5_SET(cmd_hca_cap, hca_caps, max_num_eqs, max_eqs);
+
+ err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
+
+out_free:
+ kfree(query_ctx);
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
--
2.26.2
next prev parent reply other threads:[~2024-04-01 9:06 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-01 9:05 [net-next 0/2] devlink: Add port function attribute for IO EQs Parav Pandit
2024-04-01 9:05 ` [net-next 1/2] devlink: Support setting max_io_eqs Parav Pandit
2024-04-01 9:05 ` Parav Pandit [this message]
2024-04-01 9:17 ` [net-next 2/2] mlx5/core: Support max_io_eqs for a function Kalesh Anakkur Purayil
2024-04-01 9:47 ` Parav Pandit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240401090531.574575-3-parav@nvidia.com \
--to=parav@nvidia.com \
--cc=corbet@lwn.net \
--cc=danielj@nvidia.com \
--cc=davem@davemloft.net \
--cc=dchumak@nvidia.com \
--cc=edumazet@google.com \
--cc=jiri@nvidia.com \
--cc=jiri@resnulli.us \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=saeedm@nvidia.com \
--cc=shayd@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).