* [PATCH net-next V3 1/7] net/mlx5: Lag: refactor representor reload handling
2026-05-03 20:27 [PATCH net-next V3 0/7] net/mlx5: Improve representor lifecycle and late IB representor loading Tariq Toukan
@ 2026-05-03 20:27 ` Tariq Toukan
2026-05-03 20:27 ` [PATCH net-next V3 2/7] net/mlx5: E-Switch, let esw work callers choose GFP flags Tariq Toukan
` (5 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Tariq Toukan @ 2026-05-03 20:27 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Leon Romanovsky, Jason Gunthorpe, Saeed Mahameed, Tariq Toukan,
Mark Bloch, Shay Drory, Or Har-Toov, Edward Srouji, Simon Horman,
Maher Sanalla, Parav Pandit, Patrisious Haddad, Kees Cook,
Gerd Bayer, Moshe Shemesh, Carolina Jubran, Cosmin Ratiu,
linux-rdma, linux-kernel, netdev, Gal Pressman, Dragos Tatulea
From: Mark Bloch <mbloch@nvidia.com>
Representor reload during LAG/MPESW transitions has to be repeated in
several flows, and each open-coded loop was easy to get out of sync
when adding new flags or tweaking error handling. Move the sequencing
into a single helper so that all call sites share the same ordering
and checks.
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Shay Drori <shayd@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/lag/lag.c | 45 +++++++++++--------
.../net/ethernet/mellanox/mlx5/core/lag/lag.h | 2 +
.../ethernet/mellanox/mlx5/core/lag/mpesw.c | 12 ++---
3 files changed, 33 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 449e4bd86c06..a474f970e056 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1093,6 +1093,27 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
}
}
+int mlx5_lag_reload_ib_reps(struct mlx5_lag *ldev, u32 flags, bool cont_on_fail)
+{
+ struct lag_func *pf;
+ int ret;
+ int i;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ pf = mlx5_lag_pf(ldev, i);
+ if (!(pf->dev->priv.flags & flags)) {
+ struct mlx5_eswitch *esw;
+
+ esw = pf->dev->priv.eswitch;
+ ret = mlx5_eswitch_reload_ib_reps(esw);
+ if (ret && !cont_on_fail)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
@@ -1130,9 +1151,8 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_lag_add_devices(ldev);
if (shared_fdb)
- mlx5_ldev_for_each(i, 0, ldev)
- if (!(mlx5_lag_pf(ldev, i)->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
- mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
+ mlx5_lag_reload_ib_reps(ldev, MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV,
+ true);
}
bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev)
@@ -1388,10 +1408,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (err) {
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
- if (shared_fdb) {
- mlx5_ldev_for_each(i, 0, ldev)
- mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
- }
+ if (shared_fdb)
+ mlx5_lag_reload_ib_reps(ldev, 0, true);
return;
}
@@ -1409,24 +1427,15 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_nic_vport_enable_roce(dev);
}
} else if (shared_fdb) {
- int i;
-
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
-
- mlx5_ldev_for_each(i, 0, ldev) {
- err = mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
- if (err)
- break;
- }
-
+ err = mlx5_lag_reload_ib_reps(ldev, 0, false);
if (err) {
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- mlx5_ldev_for_each(i, 0, ldev)
- mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
+ mlx5_lag_reload_ib_reps(ldev, 0, true);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 6c911374f409..daca8ebd5256 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -199,4 +199,6 @@ int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
int mlx5_lag_num_devs(struct mlx5_lag *ldev);
int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
+int mlx5_lag_reload_ib_reps(struct mlx5_lag *ldev, u32 flags,
+ bool cont_on_fail);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 5eea12a6887a..edcd06f3be7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -70,7 +70,6 @@ static int mlx5_lag_enable_mpesw(struct mlx5_lag *ldev)
int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev0;
int err;
- int i;
if (ldev->mode == MLX5_LAG_MODE_MPESW)
return 0;
@@ -103,11 +102,9 @@ static int mlx5_lag_enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- mlx5_ldev_for_each(i, 0, ldev) {
- err = mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
- if (err)
- goto err_rescan_drivers;
- }
+ err = mlx5_lag_reload_ib_reps(ldev, 0, false);
+ if (err)
+ goto err_rescan_drivers;
mlx5_lag_set_vports_agg_speed(ldev);
@@ -119,8 +116,7 @@ static int mlx5_lag_enable_mpesw(struct mlx5_lag *ldev)
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- mlx5_ldev_for_each(i, 0, ldev)
- mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
+ mlx5_lag_reload_ib_reps(ldev, 0, true);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
--
2.44.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next V3 2/7] net/mlx5: E-Switch, let esw work callers choose GFP flags
2026-05-03 20:27 [PATCH net-next V3 0/7] net/mlx5: Improve representor lifecycle and late IB representor loading Tariq Toukan
2026-05-03 20:27 ` [PATCH net-next V3 1/7] net/mlx5: Lag: refactor representor reload handling Tariq Toukan
@ 2026-05-03 20:27 ` Tariq Toukan
2026-05-03 20:27 ` [PATCH net-next V3 3/7] net/mlx5: E-Switch, add representor lifecycle lock Tariq Toukan
` (4 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Tariq Toukan @ 2026-05-03 20:27 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Leon Romanovsky, Jason Gunthorpe, Saeed Mahameed, Tariq Toukan,
Mark Bloch, Shay Drory, Or Har-Toov, Edward Srouji, Simon Horman,
Maher Sanalla, Parav Pandit, Patrisious Haddad, Kees Cook,
Gerd Bayer, Moshe Shemesh, Carolina Jubran, Cosmin Ratiu,
linux-rdma, linux-kernel, netdev, Gal Pressman, Dragos Tatulea
From: Mark Bloch <mbloch@nvidia.com>
mlx5_esw_add_work() always allocates the queued work item with
GFP_ATOMIC. That is required for the E-Switch functions-change notifier,
but not every caller of this helper will run from atomic context.
Pass an allocation flag to mlx5_esw_add_work() and keep the notifier
caller using GFP_ATOMIC. This allows sleepable callers to use GFP_KERNEL
instead of unnecessarily relying on atomic reserves.
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 69ddf56e2fc9..69134ce2a908 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3736,11 +3736,12 @@ static void esw_wq_handler(struct work_struct *work)
}
static int mlx5_esw_add_work(struct mlx5_eswitch *esw,
- void (*func)(struct mlx5_eswitch *esw))
+ void (*func)(struct mlx5_eswitch *esw),
+ gfp_t gfp)
{
struct mlx5_host_work *host_work;
- host_work = kzalloc_obj(*host_work, GFP_ATOMIC);
+ host_work = kzalloc_obj(*host_work, gfp);
if (!host_work)
return -ENOMEM;
@@ -3764,7 +3765,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb,
esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
- ret = mlx5_esw_add_work(esw, esw_vfs_changed_event_handler);
+ ret = mlx5_esw_add_work(esw, esw_vfs_changed_event_handler,
+ GFP_ATOMIC);
if (ret)
return NOTIFY_DONE;
--
2.44.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next V3 3/7] net/mlx5: E-Switch, add representor lifecycle lock
2026-05-03 20:27 [PATCH net-next V3 0/7] net/mlx5: Improve representor lifecycle and late IB representor loading Tariq Toukan
2026-05-03 20:27 ` [PATCH net-next V3 1/7] net/mlx5: Lag: refactor representor reload handling Tariq Toukan
2026-05-03 20:27 ` [PATCH net-next V3 2/7] net/mlx5: E-Switch, let esw work callers choose GFP flags Tariq Toukan
@ 2026-05-03 20:27 ` Tariq Toukan
2026-05-03 20:27 ` [PATCH net-next V3 4/7] net/mlx5: Lag, avoid LAG and representor lock cycles Tariq Toukan
` (3 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Tariq Toukan @ 2026-05-03 20:27 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Leon Romanovsky, Jason Gunthorpe, Saeed Mahameed, Tariq Toukan,
Mark Bloch, Shay Drory, Or Har-Toov, Edward Srouji, Simon Horman,
Maher Sanalla, Parav Pandit, Patrisious Haddad, Kees Cook,
Gerd Bayer, Moshe Shemesh, Carolina Jubran, Cosmin Ratiu,
linux-rdma, linux-kernel, netdev, Gal Pressman, Dragos Tatulea
From: Mark Bloch <mbloch@nvidia.com>
Add a per-E-Switch mutex for serializing representor lifecycle work and
provide small helpers for taking and dropping it. Initialize and destroy
the mutex with the E-Switch offloads state.
Add the lock and helper API first. Follow-up patches will take the lock in
the individual representor lifecycle components. This keeps the functional
changes split by component and leaves this patch without intended behavior
change, making the series easier to review and bisectable.
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 6 ++++++
.../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 12 ++++++++++++
2 files changed, 18 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2fd601bd102f..3858690e09b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -316,6 +316,7 @@ struct mlx5_esw_offload {
DECLARE_HASHTABLE(termtbl_tbl, 8);
struct mutex termtbl_mutex; /* protects termtbl hash */
struct xarray vhca_map;
+ struct mutex reps_lock; /* protects representor load/unload/register */
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode;
atomic64_t num_flows;
@@ -951,6 +952,8 @@ mlx5_esw_lag_demux_fg_create(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *
mlx5_esw_lag_demux_rule_create(struct mlx5_eswitch *esw, u16 vport_num,
struct mlx5_flow_table *lag_ft);
+void mlx5_esw_reps_block(struct mlx5_eswitch *esw);
+void mlx5_esw_reps_unblock(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -1028,6 +1031,9 @@ mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
return true;
}
+static inline void mlx5_esw_reps_block(struct mlx5_eswitch *esw) {}
+static inline void mlx5_esw_reps_unblock(struct mlx5_eswitch *esw) {}
+
static inline bool
mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 69134ce2a908..af7d0d58c048 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2413,6 +2413,16 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
return err;
}
+void mlx5_esw_reps_block(struct mlx5_eswitch *esw)
+{
+ mutex_lock(&esw->offloads.reps_lock);
+}
+
+void mlx5_esw_reps_unblock(struct mlx5_eswitch *esw)
+{
+ mutex_unlock(&esw->offloads.reps_lock);
+}
+
static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode)
{
mlx5_devcom_comp_lock(esw->dev->priv.hca_devcom_comp);
@@ -2645,6 +2655,7 @@ static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
mlx5_esw_for_each_rep(esw, i, rep)
mlx5_esw_offloads_rep_cleanup(esw, rep);
xa_destroy(&esw->offloads.vport_reps);
+ mutex_destroy(&esw->offloads.reps_lock);
}
static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
@@ -2654,6 +2665,7 @@ static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
int err;
xa_init(&esw->offloads.vport_reps);
+ mutex_init(&esw->offloads.reps_lock);
mlx5_esw_for_each_vport(esw, i, vport) {
err = mlx5_esw_offloads_rep_add(esw, vport);
--
2.44.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next V3 4/7] net/mlx5: Lag, avoid LAG and representor lock cycles
2026-05-03 20:27 [PATCH net-next V3 0/7] net/mlx5: Improve representor lifecycle and late IB representor loading Tariq Toukan
` (2 preceding siblings ...)
2026-05-03 20:27 ` [PATCH net-next V3 3/7] net/mlx5: E-Switch, add representor lifecycle lock Tariq Toukan
@ 2026-05-03 20:27 ` Tariq Toukan
2026-05-03 20:27 ` [PATCH net-next V3 5/7] net/mlx5: E-Switch, serialize representor lifecycle Tariq Toukan
` (2 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Tariq Toukan @ 2026-05-03 20:27 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Leon Romanovsky, Jason Gunthorpe, Saeed Mahameed, Tariq Toukan,
Mark Bloch, Shay Drory, Or Har-Toov, Edward Srouji, Simon Horman,
Maher Sanalla, Parav Pandit, Patrisious Haddad, Kees Cook,
Gerd Bayer, Moshe Shemesh, Carolina Jubran, Cosmin Ratiu,
linux-rdma, linux-kernel, netdev, Gal Pressman, Dragos Tatulea
From: Mark Bloch <mbloch@nvidia.com>
The LAG shared-FDB and multiport E-Switch transitions rescan auxiliary
devices and reload IB representors while holding ldev->lock. Driver
bind/unbind paths may register or unregister E-Switch representor ops, and
representor load paths may enter LAG code, so holding ldev->lock across
those calls creates lock-order cycles with the E-Switch representor lock.
Keep the devcom component locked for the transition, but drop ldev->lock
before rescanning auxiliary devices or reloading IB representors. Mark the
LAG transition as in progress while the lock is dropped and assert the
devcom lock where the helper relies on it. This preserves LAG serialization
while avoiding ldev->lock nesting under E-Switch representor registration.
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/lag/lag.c | 142 ++++++++++++++----
.../net/ethernet/mellanox/mlx5/core/lag/lag.h | 7 +-
.../ethernet/mellanox/mlx5/core/lag/mpesw.c | 10 +-
.../ethernet/mellanox/mlx5/core/lib/devcom.c | 8 +
.../ethernet/mellanox/mlx5/core/lib/devcom.h | 1 +
5 files changed, 134 insertions(+), 34 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index a474f970e056..e77f9931c39c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1063,37 +1063,99 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
return true;
}
-void mlx5_lag_add_devices(struct mlx5_lag *ldev)
+static void mlx5_lag_assert_locked_transition(struct mlx5_lag *ldev)
{
+ struct mlx5_devcom_comp_dev *devcom = NULL;
struct lag_func *pf;
int i;
- mlx5_ldev_for_each(i, 0, ldev) {
- pf = mlx5_lag_pf(ldev, i);
- if (pf->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
- continue;
+ lockdep_assert_held(&ldev->lock);
- pf->dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
- mlx5_rescan_drivers_locked(pf->dev);
+ i = mlx5_get_next_ldev_func(ldev, 0);
+ if (i < MLX5_MAX_PORTS) {
+ pf = mlx5_lag_pf(ldev, i);
+ devcom = pf->dev->priv.hca_devcom_comp;
}
+ mlx5_devcom_comp_assert_locked(devcom);
}
-void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
+static void mlx5_lag_drop_lock_for_reps(struct mlx5_lag *ldev)
+{
+ mlx5_lag_assert_locked_transition(ldev);
+
+ /* Keep PF membership stable while ldev->lock is dropped. Device add
+ * and remove paths observe mode_changes_in_progress and retry.
+ */
+ ldev->mode_changes_in_progress++;
+ mutex_unlock(&ldev->lock);
+}
+
+static void mlx5_lag_retake_lock_after_reps(struct mlx5_lag *ldev)
{
+ mutex_lock(&ldev->lock);
+ ldev->mode_changes_in_progress--;
+}
+
+void mlx5_lag_rescan_dev_locked(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev,
+ bool enable)
+{
+ if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
+ return;
+
+ if (enable)
+ dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ else
+ dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+
+ /* Auxiliary bus probe/remove can register or unregister representor
+ * callbacks and take reps_lock. Drop ldev->lock so the only ordering
+ * remains reps_lock -> ldev->lock from representor callbacks.
+ */
+ mlx5_lag_drop_lock_for_reps(ldev);
+ mlx5_rescan_drivers_locked(dev);
+ mlx5_lag_retake_lock_after_reps(ldev);
+}
+
+static void mlx5_lag_rescan_devices_locked(struct mlx5_lag *ldev, bool enable)
+{
+ struct mlx5_core_dev *devs[MLX5_MAX_PORTS];
struct lag_func *pf;
+ int num_devs = 0;
int i;
+ mlx5_lag_assert_locked_transition(ldev);
+
mlx5_ldev_for_each(i, 0, ldev) {
pf = mlx5_lag_pf(ldev, i);
if (pf->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
- pf->dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
- mlx5_rescan_drivers_locked(pf->dev);
+ if (enable)
+ pf->dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ else
+ pf->dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ devs[num_devs++] = pf->dev;
}
+
+ mlx5_lag_drop_lock_for_reps(ldev);
+ for (i = 0; i < num_devs; i++)
+ mlx5_rescan_drivers_locked(devs[i]);
+ mlx5_lag_retake_lock_after_reps(ldev);
}
-int mlx5_lag_reload_ib_reps(struct mlx5_lag *ldev, u32 flags, bool cont_on_fail)
+void mlx5_lag_add_devices(struct mlx5_lag *ldev)
+{
+ mlx5_lag_rescan_devices_locked(ldev, true);
+}
+
+void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
+{
+ mlx5_lag_rescan_devices_locked(ldev, false);
+}
+
+static int mlx5_lag_reload_ib_reps_unlocked(struct mlx5_lag *ldev, u32 flags,
+ bool cont_on_fail)
{
struct lag_func *pf;
int ret;
@@ -1105,7 +1167,9 @@ int mlx5_lag_reload_ib_reps(struct mlx5_lag *ldev, u32 flags, bool cont_on_fail)
struct mlx5_eswitch *esw;
esw = pf->dev->priv.eswitch;
+ mlx5_esw_reps_block(esw);
ret = mlx5_eswitch_reload_ib_reps(esw);
+ mlx5_esw_reps_unblock(esw);
if (ret && !cont_on_fail)
return ret;
}
@@ -1114,6 +1178,34 @@ int mlx5_lag_reload_ib_reps(struct mlx5_lag *ldev, u32 flags, bool cont_on_fail)
return 0;
}
+static int mlx5_lag_reload_ib_reps(struct mlx5_lag *ldev, u32 flags,
+ bool cont_on_fail)
+{
+ int ret;
+
+ /* The HCA devcom component lock serializes LAG mode transitions while
+ * ldev->lock is dropped here. Dropping ldev->lock is required because
+ * the reload takes the per-E-Switch reps_lock, and representor
+ * load/unload callbacks can re-enter LAG netdev add/remove and take
+ * ldev->lock. Keep the ordering reps_lock -> ldev->lock.
+ */
+ mlx5_lag_drop_lock_for_reps(ldev);
+ ret = mlx5_lag_reload_ib_reps_unlocked(ldev, flags, cont_on_fail);
+ mlx5_lag_retake_lock_after_reps(ldev);
+
+ return ret;
+}
+
+int mlx5_lag_reload_ib_reps_from_locked(struct mlx5_lag *ldev, u32 flags,
+ bool cont_on_fail)
+{
+ int ret;
+
+ ret = mlx5_lag_reload_ib_reps(ldev, flags, cont_on_fail);
+
+ return ret;
+}
+
void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
@@ -1132,10 +1224,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
if (shared_fdb) {
mlx5_lag_remove_devices(ldev);
} else if (roce_lag) {
- if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) {
- dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
- mlx5_rescan_drivers_locked(dev0);
- }
+ mlx5_lag_rescan_dev_locked(ldev, dev0, false);
mlx5_ldev_for_each(i, 0, ldev) {
if (i == idx)
continue;
@@ -1151,8 +1240,9 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_lag_add_devices(ldev);
if (shared_fdb)
- mlx5_lag_reload_ib_reps(ldev, MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV,
- true);
+ mlx5_lag_reload_ib_reps_from_locked(ldev,
+ MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV,
+ true);
}
bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev)
@@ -1409,7 +1499,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
if (shared_fdb)
- mlx5_lag_reload_ib_reps(ldev, 0, true);
+ mlx5_lag_reload_ib_reps_from_locked(ldev, 0,
+ true);
return;
}
@@ -1417,8 +1508,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (roce_lag) {
struct mlx5_core_dev *dev;
- dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
- mlx5_rescan_drivers_locked(dev0);
+ mlx5_lag_rescan_dev_locked(ldev, dev0, true);
mlx5_ldev_for_each(i, 0, ldev) {
if (i == idx)
continue;
@@ -1427,15 +1517,15 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_nic_vport_enable_roce(dev);
}
} else if (shared_fdb) {
- dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
- mlx5_rescan_drivers_locked(dev0);
- err = mlx5_lag_reload_ib_reps(ldev, 0, false);
+ mlx5_lag_rescan_dev_locked(ldev, dev0, true);
+ err = mlx5_lag_reload_ib_reps_from_locked(ldev, 0,
+ false);
if (err) {
- dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
- mlx5_rescan_drivers_locked(dev0);
+ mlx5_lag_rescan_dev_locked(ldev, dev0, false);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- mlx5_lag_reload_ib_reps(ldev, 0, true);
+ mlx5_lag_reload_ib_reps_from_locked(ldev, 0,
+ true);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index daca8ebd5256..6afe7707d076 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -164,6 +164,9 @@ void mlx5_disable_lag(struct mlx5_lag *ldev);
void mlx5_lag_remove_devices(struct mlx5_lag *ldev);
int mlx5_deactivate_lag(struct mlx5_lag *ldev);
void mlx5_lag_add_devices(struct mlx5_lag *ldev);
+void mlx5_lag_rescan_dev_locked(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev,
+ bool enable);
struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev);
#ifdef CONFIG_MLX5_ESWITCH
@@ -199,6 +202,6 @@ int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
int mlx5_lag_num_devs(struct mlx5_lag *ldev);
int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
-int mlx5_lag_reload_ib_reps(struct mlx5_lag *ldev, u32 flags,
- bool cont_on_fail);
+int mlx5_lag_reload_ib_reps_from_locked(struct mlx5_lag *ldev, u32 flags,
+ bool cont_on_fail);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index edcd06f3be7a..8a349f8fd823 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -100,9 +100,8 @@ static int mlx5_lag_enable_mpesw(struct mlx5_lag *ldev)
goto err_add_devices;
}
- dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
- mlx5_rescan_drivers_locked(dev0);
- err = mlx5_lag_reload_ib_reps(ldev, 0, false);
+ mlx5_lag_rescan_dev_locked(ldev, dev0, true);
+ err = mlx5_lag_reload_ib_reps_from_locked(ldev, 0, false);
if (err)
goto err_rescan_drivers;
@@ -111,12 +110,11 @@ static int mlx5_lag_enable_mpesw(struct mlx5_lag *ldev)
return 0;
err_rescan_drivers:
- dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
- mlx5_rescan_drivers_locked(dev0);
+ mlx5_lag_rescan_dev_locked(ldev, dev0, false);
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- mlx5_lag_reload_ib_reps(ldev, 0, true);
+ mlx5_lag_reload_ib_reps_from_locked(ldev, 0, true);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index 4b5ac2db55ce..d40c53193ea8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -3,6 +3,7 @@
#include <linux/mlx5/vport.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include "lib/devcom.h"
#include "lib/mlx5.h"
#include "mlx5_core.h"
@@ -438,3 +439,10 @@ int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
return 0;
return down_write_trylock(&devcom->comp->sem);
}
+
+void mlx5_devcom_comp_assert_locked(struct mlx5_devcom_comp_dev *devcom)
+{
+ if (!devcom)
+ return;
+ lockdep_assert_held_write(&devcom->comp->sem);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index 91e5ae529d5c..316052a85ca5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -75,5 +75,6 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom);
void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom);
int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom);
+void mlx5_devcom_comp_assert_locked(struct mlx5_devcom_comp_dev *devcom);
#endif /* __LIB_MLX5_DEVCOM_H__ */
--
2.44.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next V3 5/7] net/mlx5: E-Switch, serialize representor lifecycle
2026-05-03 20:27 [PATCH net-next V3 0/7] net/mlx5: Improve representor lifecycle and late IB representor loading Tariq Toukan
` (3 preceding siblings ...)
2026-05-03 20:27 ` [PATCH net-next V3 4/7] net/mlx5: Lag, avoid LAG and representor lock cycles Tariq Toukan
@ 2026-05-03 20:27 ` Tariq Toukan
2026-05-03 20:27 ` [PATCH net-next V3 6/7] net/mlx5: E-Switch, unwind only newly loaded representor types Tariq Toukan
2026-05-03 20:27 ` [PATCH net-next V3 7/7] net/mlx5: E-Switch, load reps via work queue after registration Tariq Toukan
6 siblings, 0 replies; 8+ messages in thread
From: Tariq Toukan @ 2026-05-03 20:27 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Leon Romanovsky, Jason Gunthorpe, Saeed Mahameed, Tariq Toukan,
Mark Bloch, Shay Drory, Or Har-Toov, Edward Srouji, Simon Horman,
Maher Sanalla, Parav Pandit, Patrisious Haddad, Kees Cook,
Gerd Bayer, Moshe Shemesh, Carolina Jubran, Cosmin Ratiu,
linux-rdma, linux-kernel, netdev, Gal Pressman, Dragos Tatulea
From: Mark Bloch <mbloch@nvidia.com>
Representor callbacks can be registered and unregistered while the
E-Switch is already in switchdev mode, and the same E-Switch may also be
reconfigured by devlink, VF changes and SF changes. Serialize these paths
with the per-E-Switch representor mutex instead of relying on ad-hoc bit
state and wait queues.
Take the representor lock around the mode transition, VF/SF representor
changes and representor ops registration. Keep mode_lock and the
representor lock unnested by using the operation flag while the mode lock
is dropped. During mode changes, drop the representor lock around the
auxiliary bus rescan because driver bind/unbind may register or unregister
representor ops.
Split representor ops registration into locked public wrappers and blocked
internal helpers, clear the ops pointer on unregister, and add nested
wrappers for the shared-FDB master IB path that registers peer
representor ops while another E-Switch representor lock is already held.
On unregister, always call __unload_reps_all_vport() before marking reps
unregistered and clearing rep_ops. The per-representor state check makes
this a no-op for types that were not loaded, so unregister no longer has
to infer load state from esw->mode.
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
drivers/infiniband/hw/mlx5/ib_rep.c | 6 +-
.../net/ethernet/mellanox/mlx5/core/eswitch.c | 10 ++
.../mellanox/mlx5/core/eswitch_offloads.c | 105 ++++++++++++++++--
.../ethernet/mellanox/mlx5/core/sf/devlink.c | 5 +
include/linux/mlx5/eswitch.h | 6 +
5 files changed, 120 insertions(+), 12 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 1709b628702e..65d8767d1830 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -262,9 +262,10 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
struct mlx5_core_dev *peer_mdev;
struct mlx5_eswitch *esw;
+ /* Called while the master E-Switch reps_lock is held. */
mlx5_lag_for_each_peer_mdev(mdev, peer_mdev, i) {
esw = peer_mdev->priv.eswitch;
- mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
+ mlx5_eswitch_unregister_vport_reps_nested(esw, REP_IB);
}
mlx5_ib_release_transport(mdev);
}
@@ -284,9 +285,10 @@ static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev)
struct mlx5_eswitch *esw;
int i;
+ /* Called while the master E-Switch reps_lock is held. */
mlx5_lag_for_each_peer_mdev(mdev, peer_mdev, i) {
esw = peer_mdev->priv.eswitch;
- mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
+ mlx5_eswitch_register_vport_reps_nested(esw, &rep_ops, REP_IB);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 66a773a99876..f70737437954 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1712,6 +1712,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
mlx5_lag_disable_change(esw->dev);
mlx5_eswitch_invalidate_wq(esw);
+ mlx5_esw_reps_block(esw);
if (!mlx5_esw_is_fdb_created(esw)) {
ret = mlx5_eswitch_enable_locked(esw, num_vfs);
@@ -1735,6 +1736,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
}
}
+ mlx5_esw_reps_unblock(esw);
+
if (toggle_lag)
mlx5_lag_enable_change(esw->dev);
@@ -1759,6 +1762,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
mlx5_eswitch_invalidate_wq(esw);
+ mlx5_esw_reps_block(esw);
if (!mlx5_core_is_ecpf(esw->dev)) {
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
@@ -1770,6 +1774,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_eswitch_clear_ec_vf_vports_info(esw);
}
+ mlx5_esw_reps_unblock(esw);
+
if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
struct devlink *devlink = priv_to_devlink(esw->dev);
@@ -1825,7 +1831,11 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
devl_assert_locked(priv_to_devlink(esw->dev));
mlx5_lag_disable_change(esw->dev);
+
+ mlx5_esw_reps_block(esw);
mlx5_eswitch_disable_locked(esw);
+ mlx5_esw_reps_unblock(esw);
+
esw->mode = MLX5_ESWITCH_LEGACY;
mlx5_lag_enable_change(esw->dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index af7d0d58c048..a393efaa2fd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
+#include <linux/lockdep.h>
#include "mlx5_core.h"
#include "eswitch.h"
#include "esw/indir_table.h"
@@ -2413,11 +2414,21 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
return err;
}
+static void mlx5_esw_assert_reps_locked(struct mlx5_eswitch *esw)
+{
+ lockdep_assert_held(&esw->offloads.reps_lock);
+}
+
void mlx5_esw_reps_block(struct mlx5_eswitch *esw)
{
mutex_lock(&esw->offloads.reps_lock);
}
+static void mlx5_esw_reps_block_nested(struct mlx5_eswitch *esw)
+{
+ mutex_lock_nested(&esw->offloads.reps_lock, SINGLE_DEPTH_NESTING);
+}
+
void mlx5_esw_reps_unblock(struct mlx5_eswitch *esw)
{
mutex_unlock(&esw->offloads.reps_lock);
@@ -2425,21 +2436,22 @@ void mlx5_esw_reps_unblock(struct mlx5_eswitch *esw)
static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode)
{
+ mlx5_esw_reps_unblock(esw);
mlx5_devcom_comp_lock(esw->dev->priv.hca_devcom_comp);
if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV ||
mlx5_core_mp_enabled(esw->dev)) {
esw->mode = mode;
- mlx5_rescan_drivers_locked(esw->dev);
- mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
- return;
+ goto out;
}
esw->dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(esw->dev);
esw->mode = mode;
esw->dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+out:
mlx5_rescan_drivers_locked(esw->dev);
mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
+ mlx5_esw_reps_block(esw);
}
static void mlx5_esw_fdb_drop_destroy(struct mlx5_eswitch *esw)
@@ -2776,6 +2788,8 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
+ mlx5_esw_assert_reps_locked(esw);
+
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
@@ -2786,6 +2800,8 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
+ mlx5_esw_assert_reps_locked(esw);
+
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_LOADED, REP_REGISTERED) == REP_LOADED) {
if (rep_type == REP_ETH)
@@ -3691,6 +3707,7 @@ static void esw_vfs_changed_event_handler(struct mlx5_eswitch *esw)
if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
goto free;
+ mlx5_esw_reps_block(esw);
/* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->esw_funcs.num_vfs > 0) {
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
@@ -3700,9 +3717,11 @@ static void esw_vfs_changed_event_handler(struct mlx5_eswitch *esw)
err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
- goto free;
+ goto unblock;
}
esw->esw_funcs.num_vfs = new_num_vfs;
+unblock:
+ mlx5_esw_reps_unblock(esw);
free:
kvfree(out);
}
@@ -4190,9 +4209,14 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
goto unlock;
}
+ /* Keep mode_lock and reps_lock unnested. The operation flag excludes
+ * mode users while mode_lock is dropped before taking reps_lock.
+ */
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
+ mlx5_esw_reps_block(esw);
+
if (mlx5_mode == MLX5_ESWITCH_OFFLOADS &&
!mlx5_devlink_netdev_netns_immutable_set(devlink, true)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -4225,6 +4249,10 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
skip:
if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && err)
mlx5_devlink_netdev_netns_immutable_set(devlink, false);
+ /* Reconfiguration is done; drop reps_lock before taking mode_lock again
+ * to clear the operation flag.
+ */
+ mlx5_esw_reps_unblock(esw);
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
unlock:
@@ -4498,9 +4526,10 @@ mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
return true;
}
-void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
- const struct mlx5_eswitch_rep_ops *ops,
- u8 rep_type)
+static void
+mlx5_eswitch_register_vport_reps_blocked(struct mlx5_eswitch *esw,
+ const struct mlx5_eswitch_rep_ops *ops,
+ u8 rep_type)
{
struct mlx5_eswitch_rep_data *rep_data;
struct mlx5_eswitch_rep *rep;
@@ -4515,21 +4544,77 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
}
}
}
+
+static void
+mlx5_eswitch_register_vport_reps_locked(struct mlx5_eswitch *esw,
+ const struct mlx5_eswitch_rep_ops *ops,
+ u8 rep_type, bool nested)
+{
+ if (nested)
+ mlx5_esw_reps_block_nested(esw);
+ else
+ mlx5_esw_reps_block(esw);
+ mlx5_eswitch_register_vport_reps_blocked(esw, ops, rep_type);
+ mlx5_esw_reps_unblock(esw);
+}
+
+void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
+ const struct mlx5_eswitch_rep_ops *ops,
+ u8 rep_type)
+{
+ mlx5_eswitch_register_vport_reps_locked(esw, ops, rep_type, false);
+}
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
-void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
+void
+mlx5_eswitch_register_vport_reps_nested(struct mlx5_eswitch *esw,
+ const struct mlx5_eswitch_rep_ops *ops,
+ u8 rep_type)
+{
+ mlx5_eswitch_register_vport_reps_locked(esw, ops, rep_type, true);
+}
+EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps_nested);
+
+static void
+mlx5_eswitch_unregister_vport_reps_blocked(struct mlx5_eswitch *esw,
+ u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
- if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- __unload_reps_all_vport(esw, rep_type);
+ __unload_reps_all_vport(esw, rep_type);
mlx5_esw_for_each_rep(esw, i, rep)
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
+
+ esw->offloads.rep_ops[rep_type] = NULL;
+}
+
+static void
+mlx5_eswitch_unregister_vport_reps_locked(struct mlx5_eswitch *esw,
+ u8 rep_type, bool nested)
+{
+ if (nested)
+ mlx5_esw_reps_block_nested(esw);
+ else
+ mlx5_esw_reps_block(esw);
+ mlx5_eswitch_unregister_vport_reps_blocked(esw, rep_type);
+ mlx5_esw_reps_unblock(esw);
+}
+
+void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
+{
+ mlx5_eswitch_unregister_vport_reps_locked(esw, rep_type, false);
}
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
+void mlx5_eswitch_unregister_vport_reps_nested(struct mlx5_eswitch *esw,
+ u8 rep_type)
+{
+ mlx5_eswitch_unregister_vport_reps_locked(esw, rep_type, true);
+}
+EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps_nested);
+
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 8503e532f423..2fc69897e35b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -245,8 +245,10 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
if (IS_ERR(sf))
return PTR_ERR(sf);
+ mlx5_esw_reps_block(esw);
err = mlx5_eswitch_load_sf_vport(esw, sf->hw_fn_id, MLX5_VPORT_UC_ADDR_CHANGE,
&sf->dl_port, new_attr->controller, new_attr->sfnum);
+ mlx5_esw_reps_unblock(esw);
if (err)
goto esw_err;
*dl_port = &sf->dl_port.dl_port;
@@ -367,7 +369,10 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink,
struct mlx5_sf_table *table = dev->priv.sf_table;
struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
+ mlx5_esw_reps_block(dev->priv.eswitch);
mlx5_sf_del(table, sf);
+ mlx5_esw_reps_unblock(dev->priv.eswitch);
+
return 0;
}
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 3b29a3c6794d..a0dd162baa78 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -63,7 +63,13 @@ struct mlx5_eswitch_rep {
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type);
+void
+mlx5_eswitch_register_vport_reps_nested(struct mlx5_eswitch *esw,
+ const struct mlx5_eswitch_rep_ops *ops,
+ u8 rep_type);
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
+void mlx5_eswitch_unregister_vport_reps_nested(struct mlx5_eswitch *esw,
+ u8 rep_type);
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
u16 vport_num,
u8 rep_type);
--
2.44.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next V3 6/7] net/mlx5: E-Switch, unwind only newly loaded representor types
2026-05-03 20:27 [PATCH net-next V3 0/7] net/mlx5: Improve representor lifecycle and late IB representor loading Tariq Toukan
` (4 preceding siblings ...)
2026-05-03 20:27 ` [PATCH net-next V3 5/7] net/mlx5: E-Switch, serialize representor lifecycle Tariq Toukan
@ 2026-05-03 20:27 ` Tariq Toukan
2026-05-03 20:27 ` [PATCH net-next V3 7/7] net/mlx5: E-Switch, load reps via work queue after registration Tariq Toukan
6 siblings, 0 replies; 8+ messages in thread
From: Tariq Toukan @ 2026-05-03 20:27 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Leon Romanovsky, Jason Gunthorpe, Saeed Mahameed, Tariq Toukan,
Mark Bloch, Shay Drory, Or Har-Toov, Edward Srouji, Simon Horman,
Maher Sanalla, Parav Pandit, Patrisious Haddad, Kees Cook,
Gerd Bayer, Moshe Shemesh, Carolina Jubran, Cosmin Ratiu,
linux-rdma, linux-kernel, netdev, Gal Pressman, Dragos Tatulea
From: Mark Bloch <mbloch@nvidia.com>
__esw_offloads_load_rep() may return success without invoking the
representor load callback when the representor type is already loaded.
On a later load failure, mlx5_esw_offloads_rep_load() unconditionally
unloaded all previously iterated representor types. This could unload
representor types that were already loaded before this load attempt.
Track which representor types were actually loaded by the current call and
unwind only those on error. Also restore the representor state back to
REP_REGISTERED when the load callback itself fails.
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../mellanox/mlx5/core/eswitch_offloads.c | 38 ++++++++++++++-----
1 file changed, 29 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index a393efaa2fd7..8a7491e9f13d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2786,13 +2786,28 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
}
static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep, u8 rep_type)
+ struct mlx5_eswitch_rep *rep,
+ u8 rep_type, bool *newly_loaded)
{
+ int err;
+
mlx5_esw_assert_reps_locked(esw);
+ if (newly_loaded)
+ *newly_loaded = false;
+
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
- REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
- return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+ REP_REGISTERED, REP_LOADED) != REP_REGISTERED)
+ return 0;
+
+ err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+ if (err) {
+ atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
+ return err;
+ }
+
+ if (newly_loaded)
+ *newly_loaded = true;
return 0;
}
@@ -2822,22 +2837,27 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
+ unsigned long loaded = 0;
+ bool newly_loaded;
int rep_type;
int err;
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
- err = __esw_offloads_load_rep(esw, rep, rep_type);
+ err = __esw_offloads_load_rep(esw, rep, rep_type,
+ &newly_loaded);
if (err)
goto err_reps;
+ if (newly_loaded)
+ loaded |= BIT(rep_type);
}
return 0;
err_reps:
- atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
- for (--rep_type; rep_type >= 0; rep_type--)
- __esw_offloads_unload_rep(esw, rep, rep_type);
+ while (--rep_type >= 0)
+ if (test_bit(rep_type, &loaded))
+ __esw_offloads_unload_rep(esw, rep, rep_type);
return err;
}
@@ -3591,13 +3611,13 @@ int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
return 0;
- ret = __esw_offloads_load_rep(esw, rep, REP_IB);
+ ret = __esw_offloads_load_rep(esw, rep, REP_IB, NULL);
if (ret)
return ret;
mlx5_esw_for_each_rep(esw, i, rep) {
if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
- __esw_offloads_load_rep(esw, rep, REP_IB);
+ __esw_offloads_load_rep(esw, rep, REP_IB, NULL);
}
return 0;
--
2.44.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next V3 7/7] net/mlx5: E-Switch, load reps via work queue after registration
2026-05-03 20:27 [PATCH net-next V3 0/7] net/mlx5: Improve representor lifecycle and late IB representor loading Tariq Toukan
` (5 preceding siblings ...)
2026-05-03 20:27 ` [PATCH net-next V3 6/7] net/mlx5: E-Switch, unwind only newly loaded representor types Tariq Toukan
@ 2026-05-03 20:27 ` Tariq Toukan
6 siblings, 0 replies; 8+ messages in thread
From: Tariq Toukan @ 2026-05-03 20:27 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Leon Romanovsky, Jason Gunthorpe, Saeed Mahameed, Tariq Toukan,
Mark Bloch, Shay Drory, Or Har-Toov, Edward Srouji, Simon Horman,
Maher Sanalla, Parav Pandit, Patrisious Haddad, Kees Cook,
Gerd Bayer, Moshe Shemesh, Carolina Jubran, Cosmin Ratiu,
linux-rdma, linux-kernel, netdev, Gal Pressman, Dragos Tatulea
From: Mark Bloch <mbloch@nvidia.com>
mlx5_eswitch_register_vport_reps() only installs representor callbacks and
marks the rep type as registered. If the E-Switch is already in switchdev
mode, the newly registered rep type must then be loaded for already enabled
vports.
That load path needs to run under the devlink lock, which is not held by
the auxiliary driver registration context. Queue the reload to the E-Switch
workqueue, whose handler acquires the devlink lock, and load the relevant
representors from there.
Since representor registration runs from sleepable auxiliary-driver
context, queue the late reload with GFP_KERNEL. The functions-change
notifier path remains the GFP_ATOMIC user of mlx5_esw_add_work().
The unregister path is unchanged and still unloads representors
synchronously while tearing down the registered callbacks.
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../mellanox/mlx5/core/eswitch_offloads.c | 34 +++++++++++++++++++
1 file changed, 34 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 8a7491e9f13d..dea5647de548 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -4565,6 +4565,38 @@ mlx5_eswitch_register_vport_reps_blocked(struct mlx5_eswitch *esw,
}
}
+static void mlx5_eswitch_reload_reps_blocked(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return;
+
+ if (mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK))
+ return;
+
+ mlx5_esw_for_each_vport(esw, i, vport) {
+ if (!vport)
+ continue;
+ if (!vport->enabled)
+ continue;
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ continue;
+ if (!mlx5_eswitch_vport_has_rep(esw, vport->vport))
+ continue;
+
+ mlx5_esw_offloads_rep_load(esw, vport->vport);
+ }
+}
+
+static void mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+{
+ mlx5_esw_reps_block(esw);
+ mlx5_eswitch_reload_reps_blocked(esw);
+ mlx5_esw_reps_unblock(esw);
+}
+
static void
mlx5_eswitch_register_vport_reps_locked(struct mlx5_eswitch *esw,
const struct mlx5_eswitch_rep_ops *ops,
@@ -4576,6 +4608,8 @@ mlx5_eswitch_register_vport_reps_locked(struct mlx5_eswitch *esw,
mlx5_esw_reps_block(esw);
mlx5_eswitch_register_vport_reps_blocked(esw, ops, rep_type);
mlx5_esw_reps_unblock(esw);
+
+ mlx5_esw_add_work(esw, mlx5_eswitch_reload_reps, GFP_KERNEL);
}
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
--
2.44.0
^ permalink raw reply related [flat|nested] 8+ messages in thread