* [PATCH net-next 1/8] net/mlx5: Use helper to parse host PF info
2026-05-10 5:34 [PATCH net-next 0/8] net/mlx5: Prepare eswitch infrastructure for satellite PF support Tariq Toukan
@ 2026-05-10 5:34 ` Tariq Toukan
2026-05-10 5:34 ` [PATCH net-next 2/8] net/mlx5: Use v1 response layout for query_esw_functions Tariq Toukan
` (6 subsequent siblings)
7 siblings, 0 replies; 9+ messages in thread
From: Tariq Toukan @ 2026-05-10 5:34 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Saeed Mahameed, Leon Romanovsky, Tariq Toukan, Mark Bloch,
Moshe Shemesh, Akiva Goldberger, netdev, linux-rdma, linux-kernel,
Gal Pressman, Dragos Tatulea
From: Moshe Shemesh <moshe@nvidia.com>
Add a helper mlx5_esw_get_host_pf_info() to retrieve host PF data from
the query_esw_functions command output, so callers no longer need to
parse the layout to obtain the required information.
Convert all callers of mlx5_esw_query_functions() to use the new helper,
preparing for upcoming support of the new op_mod that returns data in
the network_function_params layout.
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/eswitch.c | 43 ++++++++++++++-----
.../net/ethernet/mellanox/mlx5/core/eswitch.h | 15 +++++++
.../mellanox/mlx5/core/eswitch_offloads.c | 34 ++++++---------
.../net/ethernet/mellanox/mlx5/core/sriov.c | 8 ++--
4 files changed, 62 insertions(+), 38 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 43c40353b2d8..861e79ddb489 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1083,10 +1083,36 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(err);
}
+static struct mlx5_esw_pf_info
+mlx5_esw_host_pf_from_host_params(const void *entry)
+{
+ return (struct mlx5_esw_pf_info) {
+ .pf_not_exist = MLX5_GET(host_params_context, entry,
+ host_pf_not_exist),
+ .pf_disabled = MLX5_GET(host_params_context, entry,
+ host_pf_disabled),
+ .num_of_vfs = MLX5_GET(host_params_context, entry,
+ host_num_of_vfs),
+ .total_vfs = MLX5_GET(host_params_context, entry,
+ host_total_vfs),
+ .host_number = MLX5_GET(host_params_context, entry,
+ host_number),
+ };
+}
+
+struct mlx5_esw_pf_info mlx5_esw_get_host_pf_info(const u32 *out)
+{
+ const void *entry;
+
+ entry = MLX5_ADDR_OF(query_esw_functions_out, out, net_function_params);
+
+ return mlx5_esw_host_pf_from_host_params(entry);
+}
+
static int mlx5_esw_host_functions_enabled_query(struct mlx5_eswitch *esw)
{
+ struct mlx5_esw_pf_info host_pf_info;
const u32 *query_host_out;
- void *host_params;
if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
return 0;
@@ -1095,11 +1121,8 @@ static int mlx5_esw_host_functions_enabled_query(struct mlx5_eswitch *esw)
if (IS_ERR(query_host_out))
return PTR_ERR(query_host_out);
- host_params = MLX5_ADDR_OF(query_esw_functions_out,
- query_host_out, net_function_params);
- esw->esw_funcs.host_funcs_disabled =
- MLX5_GET(host_params_context, host_params,
- host_pf_not_exist);
+ host_pf_info = mlx5_esw_get_host_pf_info(query_host_out);
+ esw->esw_funcs.host_funcs_disabled = host_pf_info.pf_not_exist;
kvfree(query_host_out);
return 0;
@@ -1523,7 +1546,7 @@ static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
static void
mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
{
- void *host_params;
+ struct mlx5_esw_pf_info host_pf_info;
const u32 *out;
if (num_vfs < 0)
@@ -1538,10 +1561,8 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
if (IS_ERR(out))
return;
- host_params = MLX5_ADDR_OF(query_esw_functions_out, out,
- net_function_params);
- esw->esw_funcs.num_vfs = MLX5_GET(host_params_context, host_params,
- host_num_of_vfs);
+ host_pf_info = mlx5_esw_get_host_pf_info(out);
+ esw->esw_funcs.num_vfs = host_pf_info.num_of_vfs;
if (mlx5_core_ec_sriov_enabled(esw->dev))
esw->esw_funcs.num_ec_vfs = num_vfs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 291271afa96c..cfaae59a6e7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -71,6 +71,14 @@ struct mlx5_mapped_obj {
};
};
+struct mlx5_esw_pf_info {
+ bool pf_not_exist;
+ bool pf_disabled;
+ u16 num_of_vfs;
+ u16 total_vfs;
+ u16 host_number;
+};
+
#ifdef CONFIG_MLX5_ESWITCH
#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
@@ -649,6 +657,7 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
+struct mlx5_esw_pf_info mlx5_esw_get_host_pf_info(const u32 *out);
int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev);
int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev);
@@ -976,6 +985,12 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct mlx5_esw_pf_info
+mlx5_esw_get_host_pf_info(const u32 *out)
+{
+ return (struct mlx5_esw_pf_info) {};
+}
+
static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d95af87a4f5f..217c2fe6b690 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3708,8 +3708,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
static void esw_vfs_changed_event_handler(struct mlx5_eswitch *esw)
{
- bool host_pf_disabled;
- void *host_params;
+ struct mlx5_esw_pf_info host_pf_info;
u16 new_num_vfs;
const u32 *out;
@@ -3717,14 +3716,10 @@ static void esw_vfs_changed_event_handler(struct mlx5_eswitch *esw)
if (IS_ERR(out))
return;
- host_params = MLX5_ADDR_OF(query_esw_functions_out, out,
- net_function_params);
- new_num_vfs = MLX5_GET(host_params_context, host_params,
- host_num_of_vfs);
- host_pf_disabled = MLX5_GET(host_params_context, host_params,
- host_pf_disabled);
+ host_pf_info = mlx5_esw_get_host_pf_info(out);
+ new_num_vfs = host_pf_info.num_of_vfs;
- if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
+ if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_info.pf_disabled)
goto free;
mlx5_esw_reps_block(esw);
@@ -3826,8 +3821,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb,
static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
{
+ struct mlx5_esw_pf_info host_pf_info;
const u32 *query_host_out;
- void *host_params;
if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
return 0;
@@ -3837,10 +3832,8 @@ static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
return PTR_ERR(query_host_out);
/* Mark non local controller with non zero controller number. */
- host_params = MLX5_ADDR_OF(query_esw_functions_out,
- query_host_out, net_function_params);
- esw->offloads.host_number = MLX5_GET(host_params_context,
- host_params, host_number);
+ host_pf_info = mlx5_esw_get_host_pf_info(query_host_out);
+ esw->offloads.host_number = host_pf_info.host_number;
kvfree(query_host_out);
return 0;
}
@@ -4980,9 +4973,8 @@ int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port,
struct netlink_ext_ack *extack)
{
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ struct mlx5_esw_pf_info host_pf_info;
const u32 *query_out;
- void *host_params;
- bool pf_disabled;
if (vport->vport != MLX5_VPORT_HOST_PF) {
NL_SET_ERR_MSG_MOD(extack, "State get is not supported for VF");
@@ -4996,13 +4988,11 @@ int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port,
if (IS_ERR(query_out))
return PTR_ERR(query_out);
- host_params = MLX5_ADDR_OF(query_esw_functions_out, query_out,
- net_function_params);
- pf_disabled = MLX5_GET(host_params_context, host_params,
- host_pf_disabled);
+ host_pf_info = mlx5_esw_get_host_pf_info(query_out);
- *opstate = pf_disabled ? DEVLINK_PORT_FN_OPSTATE_DETACHED :
- DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+ *opstate = host_pf_info.pf_disabled ?
+ DEVLINK_PORT_FN_OPSTATE_DETACHED :
+ DEVLINK_PORT_FN_OPSTATE_ATTACHED;
kvfree(query_out);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 6eb6026eadd6..79f76c456d72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -273,8 +273,8 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
{
+ struct mlx5_esw_pf_info host_pf_info;
u16 host_total_vfs;
- void *host_params;
const u32 *out;
if (mlx5_core_is_ecpf_esw_manager(dev)) {
@@ -285,10 +285,8 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
*/
if (IS_ERR(out))
goto done;
- host_params = MLX5_ADDR_OF(query_esw_functions_out, out,
- net_function_params);
- host_total_vfs = MLX5_GET(host_params_context, host_params,
- host_total_vfs);
+ host_pf_info = mlx5_esw_get_host_pf_info(out);
+ host_total_vfs = host_pf_info.total_vfs;
kvfree(out);
return host_total_vfs;
}
--
2.44.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH net-next 2/8] net/mlx5: Use v1 response layout for query_esw_functions
2026-05-10 5:34 [PATCH net-next 0/8] net/mlx5: Prepare eswitch infrastructure for satellite PF support Tariq Toukan
2026-05-10 5:34 ` [PATCH net-next 1/8] net/mlx5: Use helper to parse host PF info Tariq Toukan
@ 2026-05-10 5:34 ` Tariq Toukan
2026-05-10 5:34 ` [PATCH net-next 3/8] net/mlx5: Use mlx5_eswitch_is_vf_vport() for IPsec VF checks Tariq Toukan
` (5 subsequent siblings)
7 siblings, 0 replies; 9+ messages in thread
From: Tariq Toukan @ 2026-05-10 5:34 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Saeed Mahameed, Leon Romanovsky, Tariq Toukan, Mark Bloch,
Moshe Shemesh, Akiva Goldberger, netdev, linux-rdma, linux-kernel,
Gal Pressman, Dragos Tatulea
From: Moshe Shemesh <moshe@nvidia.com>
Use the v1 response layout for the query_esw_functions command when
supported by the device. When query_host_net_function_v1 capability is
set, use MLX5_QUERY_ESW_FUNC_OP_MOD_LAYOUT_V1 to retrieve parameters
for multiple network functions, allocating the output buffer according
to query_host_net_function_num_max. Validate that firmware does not
return more entries than the allocated buffer.
This change does not introduce new functionality, but enables the
existing mlx5_esw_query_functions() callers to retrieve host PF
information with the new layout as well. The mlx5_esw_get_host_pf_info()
helper abstracts parsing the command output in both legacy and new
formats, so callers do not need to handle the different layouts.
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/eswitch.c | 88 +++++++++++++++++--
.../net/ethernet/mellanox/mlx5/core/eswitch.h | 5 +-
.../mellanox/mlx5/core/eswitch_offloads.c | 6 +-
.../net/ethernet/mellanox/mlx5/core/sriov.c | 2 +-
4 files changed, 89 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 861e79ddb489..8b62dde7eb70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1063,11 +1063,28 @@ static int eswitch_vport_event(struct notifier_block *nb,
*/
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
- int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
+ bool net_func_v1 = MLX5_CAP_GEN(dev, query_host_net_function_v1);
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
+ int alloc_entries;
+ int outlen;
u32 *out;
int err;
+ if (net_func_v1) {
+ alloc_entries = MLX5_CAP_GEN(dev,
+ query_host_net_function_num_max);
+ alloc_entries = max(alloc_entries, 1);
+ MLX5_SET(query_esw_functions_in, in, op_mod,
+ MLX5_QUERY_ESW_FUNC_OP_MOD_LAYOUT_V1);
+ outlen = MLX5_BYTE_OFF(query_esw_functions_out,
+ net_function_params) +
+ alloc_entries * MLX5_UN_SZ_BYTES(net_function_params);
+ outlen = max_t(int, outlen,
+ MLX5_ST_SZ_BYTES(query_esw_functions_out));
+ } else {
+ outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
+ }
+
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return ERR_PTR(-ENOMEM);
@@ -1076,9 +1093,25 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
- if (!err)
- return out;
+ if (err)
+ goto free;
+
+ if (net_func_v1) {
+ int num_entries;
+
+ num_entries = MLX5_GET(query_esw_functions_out, out,
+ net_function_num);
+ if (num_entries > alloc_entries) {
+ mlx5_core_warn(dev, "Got %d entries, max expected %d\n",
+ num_entries, alloc_entries);
+ err = -EINVAL;
+ goto free;
+ }
+ }
+
+ return out;
+free:
kvfree(out);
return ERR_PTR(err);
}
@@ -1100,12 +1133,55 @@ mlx5_esw_host_pf_from_host_params(const void *entry)
};
}
-struct mlx5_esw_pf_info mlx5_esw_get_host_pf_info(const u32 *out)
+static struct mlx5_esw_pf_info
+mlx5_esw_host_pf_from_net_func_params(const u8 *entry, int num_entries)
+{
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ int pf_type, state;
+
+ pf_type = MLX5_GET(network_function_params, entry, pci_pf_type);
+ if (pf_type != MLX5_PCI_PF_TYPE_EXTERNAL_HOST_PF) {
+ entry += MLX5_UN_SZ_BYTES(net_function_params);
+ continue;
+ }
+
+ state = MLX5_GET(network_function_params, entry, vhca_state);
+
+ return (struct mlx5_esw_pf_info) {
+ .pf_disabled = state != MLX5_VHCA_STATE_IN_USE,
+ .num_of_vfs = MLX5_GET(network_function_params,
+ entry, pci_num_vfs),
+ .total_vfs = MLX5_GET(network_function_params,
+ entry, pci_total_vfs),
+ .host_number = MLX5_GET(network_function_params,
+ entry, host_number),
+ };
+ }
+
+ /* No external host PF entry found */
+ return (struct mlx5_esw_pf_info) {
+ .pf_not_exist = true,
+ .pf_disabled = true,
+ };
+}
+
+struct mlx5_esw_pf_info
+mlx5_esw_get_host_pf_info(struct mlx5_core_dev *dev, const u32 *out)
{
const void *entry;
entry = MLX5_ADDR_OF(query_esw_functions_out, out, net_function_params);
+ if (MLX5_CAP_GEN(dev, query_host_net_function_v1)) {
+ int num_entries = MLX5_GET(query_esw_functions_out, out,
+ net_function_num);
+
+ return mlx5_esw_host_pf_from_net_func_params(entry,
+ num_entries);
+ }
+
return mlx5_esw_host_pf_from_host_params(entry);
}
@@ -1121,7 +1197,7 @@ static int mlx5_esw_host_functions_enabled_query(struct mlx5_eswitch *esw)
if (IS_ERR(query_host_out))
return PTR_ERR(query_host_out);
- host_pf_info = mlx5_esw_get_host_pf_info(query_host_out);
+ host_pf_info = mlx5_esw_get_host_pf_info(esw->dev, query_host_out);
esw->esw_funcs.host_funcs_disabled = host_pf_info.pf_not_exist;
kvfree(query_host_out);
@@ -1561,7 +1637,7 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
if (IS_ERR(out))
return;
- host_pf_info = mlx5_esw_get_host_pf_info(out);
+ host_pf_info = mlx5_esw_get_host_pf_info(esw->dev, out);
esw->esw_funcs.num_vfs = host_pf_info.num_of_vfs;
if (mlx5_core_ec_sriov_enabled(esw->dev))
esw->esw_funcs.num_ec_vfs = num_vfs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index cfaae59a6e7c..a5f832ed2251 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -657,7 +657,8 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
-struct mlx5_esw_pf_info mlx5_esw_get_host_pf_info(const u32 *out);
+struct mlx5_esw_pf_info mlx5_esw_get_host_pf_info(struct mlx5_core_dev *dev,
+ const u32 *out);
int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev);
int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev);
@@ -986,7 +987,7 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
}
static inline struct mlx5_esw_pf_info
-mlx5_esw_get_host_pf_info(const u32 *out)
+mlx5_esw_get_host_pf_info(struct mlx5_core_dev *dev, const u32 *out)
{
return (struct mlx5_esw_pf_info) {};
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 217c2fe6b690..acbc37b05308 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3716,7 +3716,7 @@ static void esw_vfs_changed_event_handler(struct mlx5_eswitch *esw)
if (IS_ERR(out))
return;
- host_pf_info = mlx5_esw_get_host_pf_info(out);
+ host_pf_info = mlx5_esw_get_host_pf_info(esw->dev, out);
new_num_vfs = host_pf_info.num_of_vfs;
if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_info.pf_disabled)
@@ -3832,7 +3832,7 @@ static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
return PTR_ERR(query_host_out);
/* Mark non local controller with non zero controller number. */
- host_pf_info = mlx5_esw_get_host_pf_info(query_host_out);
+ host_pf_info = mlx5_esw_get_host_pf_info(esw->dev, query_host_out);
esw->offloads.host_number = host_pf_info.host_number;
kvfree(query_host_out);
return 0;
@@ -4988,7 +4988,7 @@ int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port,
if (IS_ERR(query_out))
return PTR_ERR(query_out);
- host_pf_info = mlx5_esw_get_host_pf_info(query_out);
+ host_pf_info = mlx5_esw_get_host_pf_info(vport->dev, query_out);
*opstate = host_pf_info.pf_disabled ?
DEVLINK_PORT_FN_OPSTATE_DETACHED :
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 79f76c456d72..0770b5d99c5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -285,7 +285,7 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
*/
if (IS_ERR(out))
goto done;
- host_pf_info = mlx5_esw_get_host_pf_info(out);
+ host_pf_info = mlx5_esw_get_host_pf_info(dev, out);
host_total_vfs = host_pf_info.total_vfs;
kvfree(out);
return host_total_vfs;
--
2.44.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH net-next 3/8] net/mlx5: Use mlx5_eswitch_is_vf_vport() for IPsec VF checks
2026-05-10 5:34 [PATCH net-next 0/8] net/mlx5: Prepare eswitch infrastructure for satellite PF support Tariq Toukan
2026-05-10 5:34 ` [PATCH net-next 1/8] net/mlx5: Use helper to parse host PF info Tariq Toukan
2026-05-10 5:34 ` [PATCH net-next 2/8] net/mlx5: Use v1 response layout for query_esw_functions Tariq Toukan
@ 2026-05-10 5:34 ` Tariq Toukan
2026-05-10 5:34 ` [PATCH net-next 4/8] net/mlx5: Switch vport HCA cap helpers to kvzalloc Tariq Toukan
` (4 subsequent siblings)
7 siblings, 0 replies; 9+ messages in thread
From: Tariq Toukan @ 2026-05-10 5:34 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Saeed Mahameed, Leon Romanovsky, Tariq Toukan, Mark Bloch,
Moshe Shemesh, Akiva Goldberger, netdev, linux-rdma, linux-kernel,
Gal Pressman, Dragos Tatulea
From: Moshe Shemesh <moshe@nvidia.com>
IPsec eswitch offload operations and the enabled_ipsec_vf_count counter
are intended for VF vports only. Replace the MLX5_VPORT_HOST_PF checks
with mlx5_eswitch_is_vf_vport() to properly identify VF vports, as
preparation for adding another type of PF vports.
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c | 2 +-
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
index 4811b60ea430..b830ccd91e62 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
@@ -209,7 +209,7 @@ static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5
struct mlx5_core_dev *dev = esw->dev;
int err;
- if (vport->vport == MLX5_VPORT_HOST_PF)
+ if (!mlx5_eswitch_is_vf_vport(esw, vport->vport))
return -EOPNOTSUPP;
if (type == MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 8b62dde7eb70..9a7de7c9a667 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -958,7 +958,7 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
/* Sync with current vport context */
vport->enabled_events = enabled_events;
vport->enabled = true;
- if (vport->vport != MLX5_VPORT_HOST_PF &&
+ if (mlx5_eswitch_is_vf_vport(esw, vport_num) &&
(vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
esw->enabled_ipsec_vf_count++;
@@ -1020,7 +1020,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
mlx5_esw_vport_vhca_id_unmap(esw, vport);
}
- if (vport->vport != MLX5_VPORT_HOST_PF &&
+ if (mlx5_eswitch_is_vf_vport(esw, vport_num) &&
(vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
esw->enabled_ipsec_vf_count--;
--
2.44.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH net-next 4/8] net/mlx5: Switch vport HCA cap helpers to kvzalloc
2026-05-10 5:34 [PATCH net-next 0/8] net/mlx5: Prepare eswitch infrastructure for satellite PF support Tariq Toukan
` (2 preceding siblings ...)
2026-05-10 5:34 ` [PATCH net-next 3/8] net/mlx5: Use mlx5_eswitch_is_vf_vport() for IPsec VF checks Tariq Toukan
@ 2026-05-10 5:34 ` Tariq Toukan
2026-05-10 5:34 ` [PATCH net-next 5/8] net/mlx5: Add mlx5_vport_set_other_func_general_cap macro Tariq Toukan
` (3 subsequent siblings)
7 siblings, 0 replies; 9+ messages in thread
From: Tariq Toukan @ 2026-05-10 5:34 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Saeed Mahameed, Leon Romanovsky, Tariq Toukan, Mark Bloch,
Moshe Shemesh, Akiva Goldberger, netdev, linux-rdma, linux-kernel,
Gal Pressman, Dragos Tatulea
From: Moshe Shemesh <moshe@nvidia.com>
mlx5_vport_set_other_func_cap() and mlx5_vport_get_vhca_id() allocate
command buffers that embed the HCA capability union, exceeding 4KiB.
Use kvzalloc/kvfree so the allocation can fall back to vmalloc when
contiguous memory is scarce.
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/vport.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 4effe37fd455..f8e6b1ab7c5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1336,7 +1336,7 @@ int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id)
if (mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport, vhca_id))
return 0;
- query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ query_ctx = kvzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx)
return -ENOMEM;
@@ -1348,7 +1348,7 @@ int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id)
*vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
out_free:
- kfree(query_ctx);
+ kvfree(query_ctx);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_vport_get_vhca_id);
@@ -1363,7 +1363,7 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
void *set_ctx;
int ret;
- set_ctx = kzalloc(set_sz, GFP_KERNEL);
+ set_ctx = kvzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
return -ENOMEM;
@@ -1392,6 +1392,6 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
MLX5_SET(set_hca_cap_in, set_ctx, function_id, function_id);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx);
- kfree(set_ctx);
+ kvfree(set_ctx);
return ret;
}
--
2.44.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH net-next 5/8] net/mlx5: Add mlx5_vport_set_other_func_general_cap macro
2026-05-10 5:34 [PATCH net-next 0/8] net/mlx5: Prepare eswitch infrastructure for satellite PF support Tariq Toukan
` (3 preceding siblings ...)
2026-05-10 5:34 ` [PATCH net-next 4/8] net/mlx5: Switch vport HCA cap helpers to kvzalloc Tariq Toukan
@ 2026-05-10 5:34 ` Tariq Toukan
2026-05-10 5:34 ` [PATCH net-next 6/8] net/mlx5: Refactor mlx5_set_msix_vec_count() SET_HCA_CAP Tariq Toukan
` (2 subsequent siblings)
7 siblings, 0 replies; 9+ messages in thread
From: Tariq Toukan @ 2026-05-10 5:34 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Saeed Mahameed, Leon Romanovsky, Tariq Toukan, Mark Bloch,
Moshe Shemesh, Akiva Goldberger, netdev, linux-rdma, linux-kernel,
Gal Pressman, Dragos Tatulea
From: Moshe Shemesh <moshe@nvidia.com>
Add mlx5_vport_set_other_func_general_cap() convenience macro, symmetric
to the existing mlx5_vport_get_other_func_general_cap(), and use it in
mlx5_devlink_port_fn_roce_set().
No functional change in this patch.
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 4 ++--
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | 4 ++++
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index acbc37b05308..b06b10d443bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -4951,8 +4951,8 @@ int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
- err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+ err = mlx5_vport_set_other_func_general_cap(esw->dev, hca_caps,
+ vport_num);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
goto out_free;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index d70907f499a9..2eba141bd521 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -457,6 +457,10 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
+#define mlx5_vport_set_other_func_general_cap(dev, hca_cap, vport) \
+ mlx5_vport_set_other_func_cap(dev, hca_cap, vport, \
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE)
+
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
--
2.44.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH net-next 6/8] net/mlx5: Refactor mlx5_set_msix_vec_count() SET_HCA_CAP
2026-05-10 5:34 [PATCH net-next 0/8] net/mlx5: Prepare eswitch infrastructure for satellite PF support Tariq Toukan
` (4 preceding siblings ...)
2026-05-10 5:34 ` [PATCH net-next 5/8] net/mlx5: Add mlx5_vport_set_other_func_general_cap macro Tariq Toukan
@ 2026-05-10 5:34 ` Tariq Toukan
2026-05-10 5:34 ` [PATCH net-next 7/8] net/mlx5: Use vport helper for IPsec eswitch set caps Tariq Toukan
2026-05-10 5:34 ` [PATCH net-next 8/8] net/mlx5: Generalize enable/disable HCA for any PF vport Tariq Toukan
7 siblings, 0 replies; 9+ messages in thread
From: Tariq Toukan @ 2026-05-10 5:34 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Saeed Mahameed, Leon Romanovsky, Tariq Toukan, Mark Bloch,
Moshe Shemesh, Akiva Goldberger, netdev, linux-rdma, linux-kernel,
Gal Pressman, Dragos Tatulea
From: Moshe Shemesh <moshe@nvidia.com>
Use mlx5_vport_set_other_func_general_cap() instead of open-coding the
SET_HCA_CAP command. This removes redundant buffer allocation and
ensures consistent use of vport-based function addressing.
mlx5_vport_set_other_func_general_cap() supports both function_id and
vhca_id based addressing, so this also enables SET_HCA_CAP for vhca_id
indexed functions which was not supported before.
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/pci_irq.c | 27 +++++--------------
1 file changed, 7 insertions(+), 20 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index e051b9a939ee..0f5b8bc7861e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -87,9 +87,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
int msix_vec_count)
{
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
- void *hca_cap = NULL, *query_cap = NULL, *cap;
int num_vf_msix, min_msix, max_msix;
+ void *query_cap, *hca_caps;
bool ec_vf_function;
int vport;
int ret;
@@ -111,11 +110,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
return -EOVERFLOW;
query_cap = kvzalloc(query_sz, GFP_KERNEL);
- hca_cap = kvzalloc(set_sz, GFP_KERNEL);
- if (!hca_cap || !query_cap) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!query_cap)
+ return -ENOMEM;
ec_vf_function = mlx5_core_ec_sriov_enabled(dev);
vport = mlx5_core_func_to_vport(dev, function_id, ec_vf_function);
@@ -123,21 +119,12 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
if (ret)
goto out;
- cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
- memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
- MLX5_UN_SZ_BYTES(hca_cap_union));
- MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
-
- MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
- MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
- MLX5_SET(set_hca_cap_in, hca_cap, ec_vf_function, ec_vf_function);
- MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id);
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ MLX5_SET(cmd_hca_cap, hca_caps, dynamic_msix_table_size,
+ msix_vec_count);
- MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
- ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+ ret = mlx5_vport_set_other_func_general_cap(dev, hca_caps, vport);
out:
- kvfree(hca_cap);
kvfree(query_cap);
return ret;
}
--
2.44.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH net-next 7/8] net/mlx5: Use vport helper for IPsec eswitch set caps
2026-05-10 5:34 [PATCH net-next 0/8] net/mlx5: Prepare eswitch infrastructure for satellite PF support Tariq Toukan
` (5 preceding siblings ...)
2026-05-10 5:34 ` [PATCH net-next 6/8] net/mlx5: Refactor mlx5_set_msix_vec_count() SET_HCA_CAP Tariq Toukan
@ 2026-05-10 5:34 ` Tariq Toukan
2026-05-10 5:34 ` [PATCH net-next 8/8] net/mlx5: Generalize enable/disable HCA for any PF vport Tariq Toukan
7 siblings, 0 replies; 9+ messages in thread
From: Tariq Toukan @ 2026-05-10 5:34 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Saeed Mahameed, Leon Romanovsky, Tariq Toukan, Mark Bloch,
Moshe Shemesh, Akiva Goldberger, netdev, linux-rdma, linux-kernel,
Gal Pressman, Dragos Tatulea
From: Moshe Shemesh <moshe@nvidia.com>
Use mlx5_vport_set_other_func_cap() and
mlx5_vport_set_other_func_general_cap() in the IPsec eswitch functions
instead of open-coding the SET_HCA_CAP command. This removes redundant
buffer allocation and boilerplate, and also enables vhca_id based
addressing when supported.
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../ethernet/mellanox/mlx5/core/esw/ipsec.c | 81 ++++++-------------
1 file changed, 23 insertions(+), 58 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
index b830ccd91e62..2b5765ab60d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
@@ -81,38 +81,25 @@ int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *
static int esw_ipsec_vf_set_generic(struct mlx5_core_dev *dev, u16 vport_num, bool ipsec_ofld)
{
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
- void *hca_cap, *query_cap, *cap;
+ void *query_cap, *hca_caps;
int ret;
if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
return -EOPNOTSUPP;
query_cap = kvzalloc(query_sz, GFP_KERNEL);
- hca_cap = kvzalloc(set_sz, GFP_KERNEL);
- if (!hca_cap || !query_cap) {
- ret = -ENOMEM;
- goto free;
- }
+ if (!query_cap)
+ return -ENOMEM;
ret = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
if (ret)
goto free;
- cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
- memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
- MLX5_UN_SZ_BYTES(hca_cap_union));
- MLX5_SET(cmd_hca_cap, cap, ipsec_offload, ipsec_ofld);
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ MLX5_SET(cmd_hca_cap, hca_caps, ipsec_offload, ipsec_ofld);
- MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
- MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
- MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
-
- MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
- ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+ ret = mlx5_vport_set_other_func_general_cap(dev, hca_caps, vport_num);
free:
- kvfree(hca_cap);
kvfree(query_cap);
return ret;
}
@@ -121,49 +108,37 @@ static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport
bool enable, enum esw_vport_ipsec_offload type)
{
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
- void *hca_cap, *query_cap, *cap;
+ void *query_cap, *hca_caps;
int ret;
if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
return -EOPNOTSUPP;
query_cap = kvzalloc(query_sz, GFP_KERNEL);
- hca_cap = kvzalloc(set_sz, GFP_KERNEL);
- if (!hca_cap || !query_cap) {
- ret = -ENOMEM;
- goto free;
- }
+ if (!query_cap)
+ return -ENOMEM;
ret = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
if (ret)
goto free;
- cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
- memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
- MLX5_UN_SZ_BYTES(hca_cap_union));
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
switch (type) {
case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
- MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable);
+ MLX5_SET(ipsec_cap, hca_caps, ipsec_crypto_offload, enable);
break;
case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
- MLX5_SET(ipsec_cap, cap, ipsec_full_offload, enable);
+ MLX5_SET(ipsec_cap, hca_caps, ipsec_full_offload, enable);
break;
default:
ret = -EOPNOTSUPP;
goto free;
}
- MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
- MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
- MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport->vport);
-
- MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_IPSEC << 1);
- ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+ ret = mlx5_vport_set_other_func_cap(dev, hca_caps, vport->vport,
+ MLX5_SET_HCA_CAP_OP_MOD_IPSEC);
free:
- kvfree(hca_cap);
kvfree(query_cap);
return ret;
}
@@ -171,34 +146,24 @@ static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport
static int esw_ipsec_vf_crypto_aux_caps_set(struct mlx5_core_dev *dev, u16 vport_num, bool enable)
{
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
- struct mlx5_eswitch *esw = dev->priv.eswitch;
- void *hca_cap, *query_cap, *cap;
+ void *query_cap, *hca_caps;
int ret;
query_cap = kvzalloc(query_sz, GFP_KERNEL);
- hca_cap = kvzalloc(set_sz, GFP_KERNEL);
- if (!hca_cap || !query_cap) {
- ret = -ENOMEM;
- goto free;
- }
+ if (!query_cap)
+ return -ENOMEM;
ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
if (ret)
goto free;
- cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
- memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
- MLX5_UN_SZ_BYTES(hca_cap_union));
- MLX5_SET(per_protocol_networking_offload_caps, cap, insert_trailer, enable);
- MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
- MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
- MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
- MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS << 1);
- ret = mlx5_cmd_exec_in(esw->dev, set_hca_cap, hca_cap);
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ MLX5_SET(per_protocol_networking_offload_caps, hca_caps,
+ insert_trailer, enable);
+
+ ret = mlx5_vport_set_other_func_cap(dev, hca_caps, vport_num,
+ MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS);
free:
- kvfree(hca_cap);
kvfree(query_cap);
return ret;
}
--
2.44.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH net-next 8/8] net/mlx5: Generalize enable/disable HCA for any PF vport
2026-05-10 5:34 [PATCH net-next 0/8] net/mlx5: Prepare eswitch infrastructure for satellite PF support Tariq Toukan
` (6 preceding siblings ...)
2026-05-10 5:34 ` [PATCH net-next 7/8] net/mlx5: Use vport helper for IPsec eswitch set caps Tariq Toukan
@ 2026-05-10 5:34 ` Tariq Toukan
7 siblings, 0 replies; 9+ messages in thread
From: Tariq Toukan @ 2026-05-10 5:34 UTC (permalink / raw)
To: Eric Dumazet, Jakub Kicinski, Paolo Abeni, Andrew Lunn,
David S. Miller
Cc: Saeed Mahameed, Leon Romanovsky, Tariq Toukan, Mark Bloch,
Moshe Shemesh, Akiva Goldberger, netdev, linux-rdma, linux-kernel,
Gal Pressman, Dragos Tatulea
From: Moshe Shemesh <moshe@nvidia.com>
Refactor the host-PF-specific mlx5_cmd_host_pf_enable/disable_hca()
into generic mlx5_cmd_pf_enable/disable_hca() that accept a vport
number. The new functions use vhca_id as function_id when supported.
Similarly, refactor the eswitch layer into generic static helpers
mlx5_esw_pf_enable/disable_hca() with thin wrappers for the host PF
case, in preparation for enable_hca on satellite PF vports.
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/ecpf.c | 24 +++++++++++-----
.../net/ethernet/mellanox/mlx5/core/ecpf.h | 4 +--
.../net/ethernet/mellanox/mlx5/core/eswitch.c | 28 +++++++++++++------
.../ethernet/mellanox/mlx5/core/mlx5_core.h | 2 ++
.../net/ethernet/mellanox/mlx5/core/vport.c | 4 +--
5 files changed, 42 insertions(+), 20 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index 15cb27aea2c9..350c47d3643b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -18,25 +18,35 @@ static bool mlx5_ecpf_esw_admins_host_pf(const struct mlx5_core_dev *dev)
return mlx5_core_is_ecpf_esw_manager(dev);
}
-int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev)
+int mlx5_cmd_pf_enable_hca(struct mlx5_core_dev *dev, u16 vport_num)
{
u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
+ u16 vhca_id;
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
- MLX5_SET(enable_hca_in, in, function_id, 0);
- MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
- return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (mlx5_vport_use_vhca_id_as_func_id(dev, vport_num, &vhca_id)) {
+ MLX5_SET(enable_hca_in, in, function_id, vhca_id);
+ MLX5_SET(enable_hca_in, in, function_id_type, 1);
+ } else {
+ MLX5_SET(enable_hca_in, in, function_id, vport_num);
+ }
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev)
+int mlx5_cmd_pf_disable_hca(struct mlx5_core_dev *dev, u16 vport_num)
{
u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
+ u16 vhca_id;
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
- MLX5_SET(disable_hca_in, in, function_id, 0);
- MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0);
+ if (mlx5_vport_use_vhca_id_as_func_id(dev, vport_num, &vhca_id)) {
+ MLX5_SET(disable_hca_in, in, function_id, vhca_id);
+ MLX5_SET(disable_hca_in, in, function_id_type, 1);
+ } else {
+ MLX5_SET(disable_hca_in, in, function_id, vport_num);
+ }
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
index 40b6ad76dca6..d9f9a53b019b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
@@ -17,8 +17,8 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
int mlx5_ec_init(struct mlx5_core_dev *dev);
void mlx5_ec_cleanup(struct mlx5_core_dev *dev);
-int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev);
-int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_pf_enable_hca(struct mlx5_core_dev *dev, u16 vport_num);
+int mlx5_cmd_pf_disable_hca(struct mlx5_core_dev *dev, u16 vport_num);
#else /* CONFIG_MLX5_ESWITCH */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 9a7de7c9a667..206911817a04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1452,7 +1452,7 @@ static int mlx5_eswitch_load_ec_vf_vports(struct mlx5_eswitch *esw, u16 num_ec_v
return err;
}
-int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev)
+static int mlx5_esw_pf_enable_hca(struct mlx5_core_dev *dev, u16 vport_num)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_vport *vport;
@@ -1461,15 +1461,15 @@ int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev) || !mlx5_esw_allowed(esw))
return 0;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_HOST_PF);
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
- /* Once vport and representor are ready, take out the external host PF
- * out of initializing state. Enabling HCA clears the iser->initializing
- * bit and host PF driver loading can progress.
+ /* Once vport and representor are ready, take the PF out of
+ * initializing state. Enabling HCA clears the iser->initializing
+ * bit and PF driver loading can progress.
*/
- err = mlx5_cmd_host_pf_enable_hca(dev);
+ err = mlx5_cmd_pf_enable_hca(dev, vport_num);
if (err)
return err;
@@ -1478,7 +1478,7 @@ int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev)
return 0;
}
-int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev)
+static int mlx5_esw_pf_disable_hca(struct mlx5_core_dev *dev, u16 vport_num)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_vport *vport;
@@ -1487,11 +1487,11 @@ int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev) || !mlx5_esw_allowed(esw))
return 0;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_HOST_PF);
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
- err = mlx5_cmd_host_pf_disable_hca(dev);
+ err = mlx5_cmd_pf_disable_hca(dev, vport_num);
if (err)
return err;
@@ -1500,6 +1500,16 @@ int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev)
return 0;
}
+int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev)
+{
+ return mlx5_esw_pf_enable_hca(dev, MLX5_VPORT_HOST_PF);
+}
+
+int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev)
+{
+ return mlx5_esw_pf_disable_hca(dev, MLX5_VPORT_HOST_PF);
+}
+
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 2eba141bd521..51637e58a48b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -452,6 +452,8 @@ void mlx5_unload_one_light(struct mlx5_core_dev *dev);
void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf,
u8 *len);
+bool mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev *dev,
+ u16 vport_num, u16 *vhca_id);
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport,
u16 opmod);
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index f8e6b1ab7c5c..e0848f4e88dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1283,8 +1283,8 @@ void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf,
buf[(*len)++] = MLX5_CAP_GEN_2(mdev, load_balance_id);
}
-static bool mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev *dev,
- u16 vport_num, u16 *vhca_id)
+bool mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev *dev,
+ u16 vport_num, u16 *vhca_id)
{
if (!MLX5_CAP_GEN_2(dev, function_id_type_vhca_id))
return false;
--
2.44.0
^ permalink raw reply related [flat|nested] 9+ messages in thread