From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
Jiri Pirko <jiri@nvidia.com>, Shay Drory <shayd@nvidia.com>
Subject: [net-next 09/15] net/mlx5: Reduce number of vport lookups passing vport pointer instead of index
Date: Tue, 22 Aug 2023 22:10:06 -0700 [thread overview]
Message-ID: <20230823051012.162483-10-saeed@kernel.org> (raw)
In-Reply-To: <20230823051012.162483-1-saeed@kernel.org>
From: Jiri Pirko <jiri@nvidia.com>
During devlink port init/cleanup and register/unregister calls, there
are many lookups of vport. Instead of passing vport_num as argument to
functions, pass the vport struct pointer directly and avoid repeated
lookups.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Shay Drory <shayd@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
.../mellanox/mlx5/core/esw/devlink_port.c | 47 +++--------
.../net/ethernet/mellanox/mlx5/core/eswitch.c | 79 +++++++++++--------
.../net/ethernet/mellanox/mlx5/core/eswitch.h | 30 +++----
.../mellanox/mlx5/core/eswitch_offloads.c | 30 +++----
4 files changed, 90 insertions(+), 96 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 35cf2739a2aa..2dc7b0bf38c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -54,18 +54,15 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
}
}
-int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
struct mlx5_devlink_port *dl_port;
- struct mlx5_vport *vport;
+ u16 vport_num = vport->vport;
if (!mlx5_esw_devlink_port_supported(esw, vport_num))
return 0;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
-
dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
if (!dl_port)
return -ENOMEM;
@@ -77,12 +74,10 @@ int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, u16 vpor
return 0;
}
-void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport;
-
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport) || !vport->dl_port)
+ if (!vport->dl_port)
return;
kfree(vport->dl_port);
@@ -113,30 +108,18 @@ static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw
devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
}
-int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, u16 vport_num,
+int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
struct mlx5_devlink_port *dl_port,
u32 controller, u32 sfnum)
{
- struct mlx5_vport *vport;
-
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
-
mlx5_esw_offloads_sf_devlink_port_attrs_set(esw, &dl_port->dl_port, controller, sfnum);
vport->dl_port = dl_port;
return 0;
}
-void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
- struct mlx5_vport *vport;
-
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return;
-
vport->dl_port = NULL;
}
@@ -154,20 +137,16 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
#endif
};
-int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
struct mlx5_core_dev *dev = esw->dev;
const struct devlink_port_ops *ops;
struct mlx5_devlink_port *dl_port;
+ u16 vport_num = vport->vport;
unsigned int dl_port_index;
- struct mlx5_vport *vport;
struct devlink *devlink;
int err;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
-
dl_port = vport->dl_port;
if (!dl_port)
return 0;
@@ -196,13 +175,11 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
return err;
}
-void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
struct mlx5_devlink_port *dl_port;
- struct mlx5_vport *vport;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport) || !vport->dl_port)
+ if (!vport->dl_port)
return;
dl_port = vport->dl_port;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 044d0ba9fcf6..5368b33fde63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -882,16 +882,12 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
esw_vport_cleanup_acl(esw, vport);
}
-int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
enum mlx5_eswitch_vport_event enabled_events)
{
- struct mlx5_vport *vport;
+ u16 vport_num = vport->vport;
int ret;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
-
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
@@ -912,7 +908,7 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
vport->info.trusted = true;
- if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
if (ret)
@@ -939,15 +935,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
return ret;
}
-void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
- struct mlx5_vport *vport;
-
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return;
+ u16 vport_num = vport->vport;
mutex_lock(&esw->state_lock);
+
if (!vport->enabled)
goto done;
@@ -957,9 +950,9 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
/* Disable events from this vport */
if (MLX5_CAP_GEN(esw->dev, log_max_l2_table))
- arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
+ arm_vport_context_events_cmd(esw->dev, vport_num, 0);
- if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
mlx5_esw_vport_vhca_id_clear(esw, vport_num);
@@ -1068,82 +1061,104 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
}
}
-static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
+static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
enum mlx5_eswitch_vport_event enabled_events)
{
int err;
- err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
+ err = mlx5_esw_vport_enable(esw, vport, enabled_events);
if (err)
return err;
- err = mlx5_esw_offloads_load_rep(esw, vport_num);
+ err = mlx5_esw_offloads_load_rep(esw, vport);
if (err)
goto err_rep;
return err;
err_rep:
- mlx5_esw_vport_disable(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport);
return err;
}
-static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
+static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
- mlx5_esw_offloads_unload_rep(esw, vport_num);
- mlx5_esw_vport_disable(esw, vport_num);
+ mlx5_esw_offloads_unload_rep(esw, vport);
+ mlx5_esw_vport_disable(esw, vport);
}
static int mlx5_eswitch_load_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
{
+ struct mlx5_vport *vport;
int err;
- err = mlx5_esw_offloads_init_pf_vf_rep(esw, vport_num);
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ err = mlx5_esw_offloads_init_pf_vf_rep(esw, vport);
if (err)
return err;
- err = mlx5_eswitch_load_vport(esw, vport_num, enabled_events);
+ err = mlx5_eswitch_load_vport(esw, vport, enabled_events);
if (err)
goto err_load;
return 0;
err_load:
- mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport_num);
+ mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport);
return err;
}
static void mlx5_eswitch_unload_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
- mlx5_eswitch_unload_vport(esw, vport_num);
- mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport_num);
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return;
+
+ mlx5_eswitch_unload_vport(esw, vport);
+ mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport);
}
int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events,
struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum)
{
+ struct mlx5_vport *vport;
int err;
- err = mlx5_esw_offloads_init_sf_rep(esw, vport_num, dl_port, controller, sfnum);
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ err = mlx5_esw_offloads_init_sf_rep(esw, vport, dl_port, controller, sfnum);
if (err)
return err;
- err = mlx5_eswitch_load_vport(esw, vport_num, enabled_events);
+ err = mlx5_eswitch_load_vport(esw, vport, enabled_events);
if (err)
goto err_load;
return 0;
err_load:
- mlx5_esw_offloads_cleanup_sf_rep(esw, vport_num);
+ mlx5_esw_offloads_cleanup_sf_rep(esw, vport);
return err;
}
void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
- mlx5_eswitch_unload_vport(esw, vport_num);
- mlx5_esw_offloads_cleanup_sf_rep(esw, vport_num);
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return;
+
+ mlx5_eswitch_unload_vport(esw, vport);
+ mlx5_esw_offloads_cleanup_sf_rep(esw, vport);
}
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index b45013465738..8d03d4fe6eec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -694,9 +694,9 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
-int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
enum mlx5_eswitch_vport_event enabled_events);
-void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
@@ -734,16 +734,16 @@ void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
u16 vport,
struct mlx5_flow_spec *spec);
-int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
-int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, u16 vport_num,
+int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
struct mlx5_devlink_port *dl_port,
u32 controller, u32 sfnum);
-void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
-int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events,
@@ -754,16 +754,18 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
-int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
-int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, u16 vport_num,
+int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
struct mlx5_devlink_port *dl_port,
u32 controller, u32 sfnum);
-void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
-int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b7ece8767ffe..609605c42d6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2535,63 +2535,63 @@ static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
- return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport_num);
+ return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport);
}
-void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
- mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport_num);
+ mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport);
}
-int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, u16 vport_num,
+int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
struct mlx5_devlink_port *dl_port,
u32 controller, u32 sfnum)
{
- return mlx5_esw_offloads_sf_devlink_port_init(esw, vport_num, dl_port, controller, sfnum);
+ return mlx5_esw_offloads_sf_devlink_port_init(esw, vport, dl_port, controller, sfnum);
}
-void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
- mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport_num);
+ mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport);
}
-int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int err;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
- err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
+ err = mlx5_esw_offloads_devlink_port_register(esw, vport);
if (err)
return err;
- err = mlx5_esw_offloads_rep_load(esw, vport_num);
+ err = mlx5_esw_offloads_rep_load(esw, vport->vport);
if (err)
goto load_err;
return err;
load_err:
- mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport);
return err;
}
-void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
- mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_offloads_rep_unload(esw, vport->vport);
- mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport);
}
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
--
2.41.0
next prev parent reply other threads:[~2023-08-23 5:10 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-23 5:09 [pull request][net-next 00/15] mlx5 updates 2023-08-22 Saeed Mahameed
2023-08-23 5:09 ` [net-next 01/15] net/mlx5: Rework devlink port alloc/free into init/cleanup Saeed Mahameed
2023-08-24 13:40 ` patchwork-bot+netdevbpf
2023-08-23 5:09 ` [net-next 02/15] net/mlx5: Push out SF devlink port init and cleanup code to separate helpers Saeed Mahameed
2023-08-23 5:10 ` [net-next 03/15] net/mlx5: Push devlink port PF/VF init/cleanup calls out of devlink_port_register/unregister() Saeed Mahameed
2023-08-23 5:10 ` [net-next 04/15] net/mlx5: Allow mlx5_esw_offloads_devlink_port_register() to register SFs Saeed Mahameed
2023-08-23 5:10 ` [net-next 05/15] net/mlx5: Introduce mlx5_eswitch_load/unload_sf_vport() and use it from SF code Saeed Mahameed
2023-08-23 5:10 ` [net-next 06/15] net/mlx5: Remove no longer used mlx5_esw_offloads_sf_vport_enable/disable() Saeed Mahameed
2023-08-23 5:10 ` [net-next 07/15] net/mlx5: Don't register ops for non-PF/VF/SF port and avoid checks in ops Saeed Mahameed
2023-08-23 5:10 ` [net-next 08/15] net/mlx5: Embed struct devlink_port into driver structure Saeed Mahameed
2023-08-23 5:10 ` Saeed Mahameed [this message]
2023-08-23 5:10 ` [net-next 10/15] net/mlx5: Return -EOPNOTSUPP in mlx5_devlink_port_fn_migratable_set() directly Saeed Mahameed
2023-08-23 5:10 ` [net-next 11/15] net/mlx5: Relax mlx5_devlink_eswitch_get() return value checking Saeed Mahameed
2023-08-23 5:10 ` [net-next 12/15] net/mlx5: Check vhca_resource_manager capability in each op and add extack msg Saeed Mahameed
2023-08-23 5:10 ` [net-next 13/15] net/mlx5: Store vport in struct mlx5_devlink_port and use it in port ops Saeed Mahameed
2023-08-23 5:10 ` [net-next 14/15] net/mlx5e: Support IPsec upper protocol selector field offload for RX Saeed Mahameed
2023-08-23 5:10 ` [net-next 15/15] net/mlx5e: Support IPsec upper TCP protocol selector Saeed Mahameed
2023-08-24 2:09 ` [pull request][net-next 00/15] mlx5 updates 2023-08-22 Jakub Kicinski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230823051012.162483-10-saeed@kernel.org \
--to=saeed@kernel.org \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=jiri@nvidia.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=saeedm@nvidia.com \
--cc=shayd@nvidia.com \
--cc=tariqt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).