From: Tariq Toukan <tariqt@nvidia.com>
To: Leon Romanovsky <leon@kernel.org>, Jason Gunthorpe <jgg@ziepe.ca>,
"Saeed Mahameed" <saeedm@nvidia.com>,
Tariq Toukan <tariqt@nvidia.com>
Cc: Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Andrew Lunn <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
Mark Bloch <mbloch@nvidia.com>, <linux-kernel@vger.kernel.org>,
<linux-rdma@vger.kernel.org>, <netdev@vger.kernel.org>,
Gal Pressman <gal@nvidia.com>,
Dragos Tatulea <dtatulea@nvidia.com>,
Moshe Shemesh <moshe@nvidia.com>, Shay Drory <shayd@nvidia.com>,
Alexei Lazar <alazar@nvidia.com>
Subject: [PATCH mlx5-next V2 6/9] net/mlx5: LAG, replace mlx5_get_dev_index with LAG sequence number
Date: Mon, 9 Mar 2026 11:34:32 +0200 [thread overview]
Message-ID: <20260309093435.1850724-7-tariqt@nvidia.com> (raw)
In-Reply-To: <20260309093435.1850724-1-tariqt@nvidia.com>
From: Shay Drory <shayd@nvidia.com>
Introduce mlx5_lag_get_dev_seq() which returns a device's sequence
number within the LAG: master is always 0, remaining devices numbered
sequentially. This provides a stable index for peer flow tracking and
vport ordering without depending on native_port_num.
Replace mlx5_get_dev_index() usage in en_tc.c (peer flow array
indexing) and ib_rep.c (vport index ordering) with the new API.
Signed-off-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
drivers/infiniband/hw/mlx5/ib_rep.c | 4 ++-
.../net/ethernet/mellanox/mlx5/core/en_tc.c | 9 ++---
.../net/ethernet/mellanox/mlx5/core/lag/lag.c | 34 +++++++++++++++++++
include/linux/mlx5/lag.h | 11 ++++++
4 files changed, 53 insertions(+), 5 deletions(-)
create mode 100644 include/linux/mlx5/lag.h
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 621834d75205..df8f049c5806 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*/
+#include <linux/mlx5/lag.h>
#include <linux/mlx5/vport.h>
#include "ib_rep.h"
#include "srq.h"
@@ -134,7 +135,8 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
/* Only 1 ib port is the representor for all uplinks */
peer_n_ports--;
- if (mlx5_get_dev_index(peer_dev) < mlx5_get_dev_index(dev))
+ if (mlx5_lag_get_dev_seq(peer_dev) <
+ mlx5_lag_get_dev_seq(dev))
vport_index += peer_n_ports;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 1434b65d4746..397a93584fd6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -35,6 +35,7 @@
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/lag.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
#include <linux/refcount.h>
@@ -2131,7 +2132,7 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
mutex_unlock(&esw->offloads.peer_mutex);
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
- if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
+ if (peer_index != mlx5_lag_get_dev_seq(peer_flow->priv->mdev))
continue;
list_del(&peer_flow->peer_flows);
@@ -2154,7 +2155,7 @@ static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow)
devcom = flow->priv->mdev->priv.eswitch->devcom;
mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
- i = mlx5_get_dev_index(peer_esw->dev);
+ i = mlx5_lag_get_dev_seq(peer_esw->dev);
mlx5e_tc_del_fdb_peer_flow(flow, i);
}
}
@@ -4584,7 +4585,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- int i = mlx5_get_dev_index(peer_esw->dev);
+ int i = mlx5_lag_get_dev_seq(peer_esw->dev);
struct mlx5e_rep_priv *peer_urpriv;
struct mlx5e_tc_flow *peer_flow;
struct mlx5_core_dev *in_mdev;
@@ -5525,7 +5526,7 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
devcom = esw->devcom;
mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
- i = mlx5_get_dev_index(peer_esw->dev);
+ i = mlx5_lag_get_dev_seq(peer_esw->dev);
list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i])
mlx5e_tc_del_fdb_peers_flow(flow);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 4beee64c937a..51ec8f61ecbb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
+#include <linux/mlx5/lag.h>
#include "lib/mlx5.h"
#include "lib/devcom.h"
#include "mlx5_core.h"
@@ -369,6 +370,39 @@ int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq)
return -ENOENT;
}
+/* Reverse of mlx5_lag_get_dev_index_by_seq: given a device, return its
+ * sequence number in the LAG. Master is always 0, others numbered
+ * sequentially starting from 1.
+ */
+int mlx5_lag_get_dev_seq(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev = mlx5_lag_dev(dev);
+ int master_idx, i, num = 1;
+ struct lag_func *pf;
+
+ if (!ldev)
+ return -ENOENT;
+
+ master_idx = mlx5_lag_get_master_idx(ldev);
+ if (master_idx < 0)
+ return -ENOENT;
+
+ pf = mlx5_lag_pf(ldev, master_idx);
+ if (pf && pf->dev == dev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (i == master_idx)
+ continue;
+ pf = mlx5_lag_pf(ldev, i);
+ if (pf->dev == dev)
+ return num;
+ num++;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(mlx5_lag_get_dev_seq);
+
/* Devcom events for LAG master marking */
#define LAG_DEVCOM_PAIR (0)
#define LAG_DEVCOM_UNPAIR (1)
diff --git a/include/linux/mlx5/lag.h b/include/linux/mlx5/lag.h
new file mode 100644
index 000000000000..d370dfd19055
--- /dev/null
+++ b/include/linux/mlx5/lag.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LAG_API_H__
+#define __MLX5_LAG_API_H__
+
+struct mlx5_core_dev;
+
+int mlx5_lag_get_dev_seq(struct mlx5_core_dev *dev);
+
+#endif /* __MLX5_LAG_API_H__ */
--
2.44.0
next prev parent reply other threads:[~2026-03-09 9:35 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-09 9:34 [PATCH mlx5-next V2 0/9] mlx5-next updates 2026-03-09 Tariq Toukan
2026-03-09 9:34 ` [PATCH mlx5-next V2 1/9] net/mlx5: Add IFC bits for shared headroom pool PBMC support Tariq Toukan
2026-03-09 9:34 ` [PATCH mlx5-next V2 2/9] net/mlx5: Add silent mode set/query and VHCA RX IFC bits Tariq Toukan
2026-03-09 9:34 ` [PATCH mlx5-next V2 3/9] net/mlx5: LAG, replace pf array with xarray Tariq Toukan
2026-03-09 9:34 ` [PATCH mlx5-next V2 4/9] net/mlx5: LAG, use xa_alloc to manage LAG device indices Tariq Toukan
2026-03-09 9:34 ` [PATCH mlx5-next V2 5/9] net/mlx5: E-switch, modify peer miss rule index to vhca_id Tariq Toukan
2026-03-09 9:34 ` Tariq Toukan [this message]
2026-03-09 9:34 ` [PATCH mlx5-next V2 7/9] net/mlx5: Add VHCA RX flow destination support for FW steering Tariq Toukan
2026-03-09 9:34 ` [PATCH mlx5-next V2 8/9] {net/RDMA}/mlx5: Add LAG demux table API and vport demux rules Tariq Toukan
2026-03-09 9:34 ` [PATCH mlx5-next V2 9/9] net/mlx5: Expose MLX5_UMR_ALIGN definition Tariq Toukan
2026-03-14 18:08 ` [PATCH mlx5-next V2 0/9] mlx5-next updates 2026-03-09 Tariq Toukan
2026-03-16 20:23 ` Leon Romanovsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260309093435.1850724-7-tariqt@nvidia.com \
--to=tariqt@nvidia.com \
--cc=alazar@nvidia.com \
--cc=andrew+netdev@lunn.ch \
--cc=davem@davemloft.net \
--cc=dtatulea@nvidia.com \
--cc=edumazet@google.com \
--cc=gal@nvidia.com \
--cc=jgg@ziepe.ca \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=mbloch@nvidia.com \
--cc=moshe@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=saeedm@nvidia.com \
--cc=shayd@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox