From: Tariq Toukan <tariqt@nvidia.com>
To: Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Andrew Lunn <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>
Cc: Donald Hunter <donald.hunter@gmail.com>,
Jiri Pirko <jiri@resnulli.us>, Jonathan Corbet <corbet@lwn.net>,
Saeed Mahameed <saeedm@nvidia.com>,
"Leon Romanovsky" <leon@kernel.org>,
Tariq Toukan <tariqt@nvidia.com>, Mark Bloch <mbloch@nvidia.com>,
<netdev@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
<linux-doc@vger.kernel.org>, <linux-rdma@vger.kernel.org>,
Gal Pressman <gal@nvidia.com>, Moshe Shemesh <moshe@nvidia.com>,
Carolina Jubran <cjubran@nvidia.com>,
Cosmin Ratiu <cratiu@nvidia.com>, Jiri Pirko <jiri@nvidia.com>,
Randy Dunlap <rdunlap@infradead.org>,
Simon Horman <horms@kernel.org>,
Krzysztof Kozlowski <krzk@kernel.org>
Subject: [PATCH net-next V5 13/15] net/mlx5: qos: Support cross-device tx scheduling
Date: Tue, 20 Jan 2026 09:57:56 +0200 [thread overview]
Message-ID: <1768895878-1637182-14-git-send-email-tariqt@nvidia.com> (raw)
In-Reply-To: <1768895878-1637182-1-git-send-email-tariqt@nvidia.com>
From: Cosmin Ratiu <cratiu@nvidia.com>
Up to now, rate groups could only contain vports from the same E-Switch.
This patch relaxes that restriction if the device supports it
(HCA_CAP.esw_cross_esw_sched == true) and the right conditions are met:
- Link Aggregation (LAG) is enabled.
- The E-Switches are from the same shared devlink device.
This patch does not yet enable cross-esw scheduling, it's just the last
preparatory patch.
Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/esw/qos.c | 122 ++++++++++++------
1 file changed, 86 insertions(+), 36 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 0d187399d846..b4abb6fa2168 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -50,7 +50,9 @@ struct mlx5_esw_sched_node {
enum sched_node_type type;
/* The eswitch this node belongs to. */
struct mlx5_eswitch *esw;
- /* The children nodes of this node, empty list for leaf nodes. */
+ /* The children nodes of this node, empty list for leaf nodes.
+ * Can be from multiple E-Switches.
+ */
struct list_head children;
/* Valid only if this node is associated with a vport. */
struct mlx5_vport *vport;
@@ -419,6 +421,7 @@ esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
struct mlx5_esw_sched_node *parent = vport_node->parent;
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_core_dev *dev = vport_node->esw->dev;
+ struct mlx5_vport *vport = vport_node->vport;
void *attr;
if (!mlx5_qos_element_type_supported(
@@ -430,11 +433,18 @@ esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
- MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport);
+ MLX5_SET(vport_element, attr, vport_number, vport->vport);
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
parent ? parent->ix : vport_node->esw->qos.root_tsar_ix);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
vport_node->max_rate);
+ if (vport->dev != dev) {
+ /* The port is assigned to a node on another eswitch. */
+ MLX5_SET(vport_element, attr, eswitch_owner_vhca_id_valid,
+ true);
+ MLX5_SET(vport_element, attr, eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(vport->dev, vhca_id));
+ }
return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack);
}
@@ -446,6 +456,7 @@ esw_qos_vport_tc_create_sched_element(struct mlx5_esw_sched_node *vport_tc_node,
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_core_dev *dev = vport_tc_node->esw->dev;
+ struct mlx5_vport *vport = vport_tc_node->vport;
void *attr;
if (!mlx5_qos_element_type_supported(
@@ -457,8 +468,7 @@ esw_qos_vport_tc_create_sched_element(struct mlx5_esw_sched_node *vport_tc_node,
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC);
attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
- MLX5_SET(vport_tc_element, attr, vport_number,
- vport_tc_node->vport->vport);
+ MLX5_SET(vport_tc_element, attr, vport_number, vport->vport);
MLX5_SET(vport_tc_element, attr, traffic_class, vport_tc_node->tc);
MLX5_SET(scheduling_context, sched_ctx, max_bw_obj_id,
rate_limit_elem_ix);
@@ -466,6 +476,13 @@ esw_qos_vport_tc_create_sched_element(struct mlx5_esw_sched_node *vport_tc_node,
vport_tc_node->parent->ix);
MLX5_SET(scheduling_context, sched_ctx, bw_share,
vport_tc_node->bw_share);
+ if (vport->dev != dev) {
+ /* The port is assigned to a node on another eswitch. */
+ MLX5_SET(vport_tc_element, attr, eswitch_owner_vhca_id_valid,
+ true);
+ MLX5_SET(vport_tc_element, attr, eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(vport->dev, vhca_id));
+ }
return esw_qos_node_create_sched_element(vport_tc_node, sched_ctx,
extack);
@@ -1194,6 +1211,29 @@ static int esw_qos_vport_tc_check_type(enum sched_node_type curr_type,
return 0;
}
+static bool esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch *esw,
+ u32 *tc_bw)
+{
+ int i, num_tcs = esw_qos_num_tcs(esw->dev);
+
+ for (i = num_tcs; i < DEVLINK_RATE_TCS_MAX; i++)
+ if (tc_bw[i])
+ return false;
+
+ return true;
+}
+
+static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport,
+ u32 *tc_bw)
+{
+ struct mlx5_esw_sched_node *node = vport->qos.sched_node;
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ esw = (node && node->parent) ? node->parent->esw : esw;
+
+ return esw_qos_validate_unsupported_tc_bw(esw, tc_bw);
+}
+
static int esw_qos_vport_update(struct mlx5_vport *vport,
enum sched_node_type type,
struct mlx5_esw_sched_node *parent,
@@ -1213,8 +1253,17 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
if (err)
return err;
- if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type)
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
+ struct mlx5_eswitch *esw = parent ?
+ parent->esw : vport->dev->priv.eswitch;
+
esw_qos_tc_arbiter_get_bw_shares(vport_node, curr_tc_bw);
+ if (!esw_qos_validate_unsupported_tc_bw(esw, curr_tc_bw)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported traffic classes on the new device");
+ return -EOPNOTSUPP;
+ }
+ }
esw_qos_vport_disable(vport, extack);
@@ -1224,10 +1273,9 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
extack = NULL;
}
- if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type)
esw_qos_set_tc_arbiter_bw_shares(vport_node, curr_tc_bw,
extack);
- }
return err;
}
@@ -1575,30 +1623,6 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return 0;
}
-static bool esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch *esw,
- u32 *tc_bw)
-{
- int i, num_tcs = esw_qos_num_tcs(esw->dev);
-
- for (i = num_tcs; i < DEVLINK_RATE_TCS_MAX; i++) {
- if (tc_bw[i])
- return false;
- }
-
- return true;
-}
-
-static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport,
- u32 *tc_bw)
-{
- struct mlx5_esw_sched_node *node = vport->qos.sched_node;
- struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
-
- esw = (node && node->parent) ? node->parent->esw : esw;
-
- return esw_qos_validate_unsupported_tc_bw(esw, tc_bw);
-}
-
static bool esw_qos_tc_bw_disabled(u32 *tc_bw)
{
int i;
@@ -1803,18 +1827,44 @@ int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
return 0;
}
+static int
+mlx5_esw_validate_cross_esw_scheduling(struct mlx5_eswitch *esw,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ if (!parent || esw == parent->esw)
+ return 0;
+
+ if (!MLX5_CAP_QOS(esw->dev, esw_cross_esw_sched)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cross E-Switch scheduling is not supported");
+ return -EOPNOTSUPP;
+ }
+ if (esw->dev->shd != parent->esw->dev->shd) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot add vport to a parent belonging to a different device");
+ return -EOPNOTSUPP;
+ }
+ if (!mlx5_lag_is_active(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cross E-Switch scheduling requires LAG to be activated");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int
mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport,
struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
- int err = 0;
+ int err;
- if (parent && parent->esw != esw) {
- NL_SET_ERR_MSG_MOD(extack, "Cross E-Switch scheduling is not supported");
- return -EOPNOTSUPP;
- }
+ err = mlx5_esw_validate_cross_esw_scheduling(esw, parent, extack);
+ if (err)
+ return err;
if (!vport->qos.sched_node && parent) {
enum sched_node_type type;
--
2.44.0
next prev parent reply other threads:[~2026-01-20 7:59 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-20 7:57 [PATCH net-next V5 00/15] devlink and mlx5: Support cross-function rate scheduling Tariq Toukan
2026-01-20 7:57 ` [PATCH net-next V5 01/15] documentation: networking: add shared devlink documentation Tariq Toukan
2026-01-20 7:57 ` [PATCH net-next V5 02/15] devlink: introduce shared devlink instance for PFs on same chip Tariq Toukan
2026-01-20 7:57 ` [PATCH net-next V5 03/15] devlink: Reverse locking order for nested instances Tariq Toukan
2026-01-20 7:57 ` [PATCH net-next V5 04/15] devlink: Add helpers to lock nested-in instances Tariq Toukan
2026-01-20 7:57 ` [PATCH net-next V5 05/15] devlink: Refactor devlink_rate_nodes_check Tariq Toukan
2026-01-20 7:57 ` [PATCH net-next V5 06/15] devlink: Decouple rate storage from associated devlink object Tariq Toukan
2026-01-22 3:39 ` [net-next,V5,06/15] " Jakub Kicinski
2026-01-22 11:18 ` Cosmin Ratiu
2026-01-20 7:57 ` [PATCH net-next V5 07/15] devlink: Add parent dev to devlink API Tariq Toukan
2026-01-20 7:57 ` [PATCH net-next V5 08/15] devlink: Allow parent dev for rate-set and rate-new Tariq Toukan
2026-01-20 7:57 ` [PATCH net-next V5 09/15] devlink: Allow rate node parents from other devlinks Tariq Toukan
2026-01-20 7:57 ` [PATCH net-next V5 10/15] net/mlx5: Add a shared devlink instance for PFs on same chip Tariq Toukan
2026-01-22 3:39 ` [net-next,V5,10/15] " Jakub Kicinski
2026-01-22 3:41 ` Jakub Kicinski
2026-01-22 7:42 ` [PATCH net-next V5 10/15] " Krzysztof Kozlowski
2026-01-22 11:13 ` Cosmin Ratiu
2026-01-20 7:57 ` [PATCH net-next V5 11/15] net/mlx5: Expose a function to clear a vport's parent Tariq Toukan
2026-01-22 3:40 ` [net-next,V5,11/15] " Jakub Kicinski
2026-01-22 3:42 ` Jakub Kicinski
2026-01-20 7:57 ` [PATCH net-next V5 12/15] net/mlx5: Store QoS sched nodes in the sh_devlink Tariq Toukan
2026-01-22 3:40 ` [net-next,V5,12/15] " Jakub Kicinski
2026-01-22 11:15 ` Cosmin Ratiu
2026-01-20 7:57 ` Tariq Toukan [this message]
2026-01-20 7:57 ` [PATCH net-next V5 14/15] net/mlx5: qos: Enable cross-device scheduling Tariq Toukan
2026-01-20 7:57 ` [PATCH net-next V5 15/15] net/mlx5: Document devlink rates Tariq Toukan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1768895878-1637182-14-git-send-email-tariqt@nvidia.com \
--to=tariqt@nvidia.com \
--cc=andrew+netdev@lunn.ch \
--cc=cjubran@nvidia.com \
--cc=corbet@lwn.net \
--cc=cratiu@nvidia.com \
--cc=davem@davemloft.net \
--cc=donald.hunter@gmail.com \
--cc=edumazet@google.com \
--cc=gal@nvidia.com \
--cc=horms@kernel.org \
--cc=jiri@nvidia.com \
--cc=jiri@resnulli.us \
--cc=krzk@kernel.org \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=mbloch@nvidia.com \
--cc=moshe@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=rdunlap@infradead.org \
--cc=saeedm@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox