public inbox for netdev@vger.kernel.org
 help / color / mirror / Atom feed
From: Kamal Heib <kheib@redhat.com>
To: Saeed Mahameed <saeed@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
	Eric Dumazet <edumazet@google.com>,
	Saeed Mahameed <saeedm@nvidia.com>,
	netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
	Gal Pressman <gal@nvidia.com>,
	Leon Romanovsky <leonro@nvidia.com>, Jiri Pirko <jiri@nvidia.com>,
	Vlad Dumitrescu <vdumitrescu@nvidia.com>
Subject: Re: [PATCH net-next 04/14] net/mlx5: Implement devlink enable_sriov parameter
Date: Tue, 4 Mar 2025 11:43:38 -0500	[thread overview]
Message-ID: <Z8ctuhBNsAcK_ZRH@fedora-x1> (raw)
In-Reply-To: <20250228021227.871993-5-saeed@kernel.org>

On Thu, Feb 27, 2025 at 06:12:17PM -0800, Saeed Mahameed wrote:
> From: Vlad Dumitrescu <vdumitrescu@nvidia.com>
> 
> Example usage:
>   devlink dev param set pci/0000:01:00.0 name enable_sriov value {true, false} cmode permanent
>   devlink dev reload pci/0000:01:00.0 action fw_activate
>   echo 1 >/sys/bus/pci/devices/0000:01:00.0/remove
>   echo 1 >/sys/bus/pci/rescan
>   grep ^ /sys/bus/pci/devices/0000:01:00.0/sriov_*
> 
> Signed-off-by: Vlad Dumitrescu <vdumitrescu@nvidia.com>
> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>

Tested-by: Kamal Heib <kheib@redhat.com>

> ---
>  Documentation/networking/devlink/mlx5.rst     |  14 +-
>  .../net/ethernet/mellanox/mlx5/core/devlink.c |   1 +
>  .../mellanox/mlx5/core/lib/nv_param.c         | 184 ++++++++++++++++++
>  3 files changed, 196 insertions(+), 3 deletions(-)
> 
> diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
> index 417e5cdcd35d..587e0200c1cd 100644
> --- a/Documentation/networking/devlink/mlx5.rst
> +++ b/Documentation/networking/devlink/mlx5.rst
> @@ -15,23 +15,31 @@ Parameters
>     * - Name
>       - Mode
>       - Validation
> +     - Notes
>     * - ``enable_roce``
>       - driverinit
> -     - Type: Boolean
> -
> -       If the device supports RoCE disablement, RoCE enablement state controls
> +     - Boolean
> +     - If the device supports RoCE disablement, RoCE enablement state controls
>         device support for RoCE capability. Otherwise, the control occurs in the
>         driver stack. When RoCE is disabled at the driver level, only raw
>         ethernet QPs are supported.
>     * - ``io_eq_size``
>       - driverinit
>       - The range is between 64 and 4096.
> +     -
>     * - ``event_eq_size``
>       - driverinit
>       - The range is between 64 and 4096.
> +     -
>     * - ``max_macs``
>       - driverinit
>       - The range is between 1 and 2^31. Only power of 2 values are supported.
> +     -
> +   * - ``enable_sriov``
> +     - permanent
> +     - Boolean
> +     - Applies to each physical function (PF) independently, if the device
> +       supports it. Otherwise, it applies symmetrically to all PFs.
>  
>  The ``mlx5`` driver also implements the following driver-specific
>  parameters.
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> index 1f764ae4f4aa..7a702d84f19a 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> @@ -8,6 +8,7 @@
>  #include "fs_core.h"
>  #include "eswitch.h"
>  #include "esw/qos.h"
> +#include "lib/nv_param.h"
>  #include "sf/dev/dev.h"
>  #include "sf/sf.h"
>  #include "lib/nv_param.h"
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
> index 5ab37a88c260..6b63fc110e2d 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
> @@ -5,7 +5,11 @@
>  #include "mlx5_core.h"
>  
>  enum {
> +	MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF               = 0x80,
> +	MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP                = 0x81,
>  	MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG             = 0x10a,
> +
> +	MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF                   = 0x80,
>  };
>  
>  struct mlx5_ifc_configuration_item_type_class_global_bits {
> @@ -13,9 +17,18 @@ struct mlx5_ifc_configuration_item_type_class_global_bits {
>  	u8         parameter_index[0x18];
>  };
>  
> +struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits {
> +	u8         type_class[0x8];
> +	u8         pf_index[0x6];
> +	u8         pci_bus_index[0x8];
> +	u8         parameter_index[0xa];
> +};
> +
>  union mlx5_ifc_config_item_type_auto_bits {
>  	struct mlx5_ifc_configuration_item_type_class_global_bits
>  				configuration_item_type_class_global;
> +	struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits
> +				configuration_item_type_class_per_host_pf;
>  	u8 reserved_at_0[0x20];
>  };
>  
> @@ -45,6 +58,45 @@ struct mlx5_ifc_mnvda_reg_bits {
>  	u8         configuration_item_data[64][0x20];
>  };
>  
> +struct mlx5_ifc_nv_global_pci_conf_bits {
> +	u8         sriov_valid[0x1];
> +	u8         reserved_at_1[0x10];
> +	u8         per_pf_total_vf[0x1];
> +	u8         reserved_at_12[0xe];
> +
> +	u8         sriov_en[0x1];
> +	u8         reserved_at_21[0xf];
> +	u8         total_vfs[0x10];
> +
> +	u8         reserved_at_40[0x20];
> +};
> +
> +struct mlx5_ifc_nv_global_pci_cap_bits {
> +	u8         max_vfs_per_pf_valid[0x1];
> +	u8         reserved_at_1[0x13];
> +	u8         per_pf_total_vf_supported[0x1];
> +	u8         reserved_at_15[0xb];
> +
> +	u8         sriov_support[0x1];
> +	u8         reserved_at_21[0xf];
> +	u8         max_vfs_per_pf[0x10];
> +
> +	u8         reserved_at_40[0x60];
> +};
> +
> +struct mlx5_ifc_nv_pf_pci_conf_bits {
> +	u8         reserved_at_0[0x9];
> +	u8         pf_total_vf_en[0x1];
> +	u8         reserved_at_a[0x16];
> +
> +	u8         reserved_at_20[0x20];
> +
> +	u8         reserved_at_40[0x10];
> +	u8         total_vf[0x10];
> +
> +	u8         reserved_at_60[0x20];
> +};
> +
>  struct mlx5_ifc_nv_sw_offload_conf_bits {
>  	u8         ip_over_vxlan_port[0x10];
>  	u8         tunnel_ecn_copy_offload_disable[0x1];
> @@ -206,7 +258,139 @@ static int mlx5_nv_param_devlink_cqe_compress_set(struct devlink *devlink, u32 i
>  	return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
>  }
>  
> +static int
> +mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev *dev, void *mnvda, size_t len)
> +{
> +	MLX5_SET_CONFIG_ITEM_TYPE(global, mnvda, type_class, 0);
> +	MLX5_SET_CONFIG_ITEM_TYPE(global, mnvda, parameter_index,
> +				  MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF);
> +	MLX5_SET_CONFIG_HDR_LEN(mnvda, nv_global_pci_conf);
> +
> +	return mlx5_nv_param_read(dev, mnvda, len);
> +}
> +
> +static int
> +mlx5_nv_param_read_global_pci_cap(struct mlx5_core_dev *dev, void *mnvda, size_t len)
> +{
> +	MLX5_SET_CONFIG_ITEM_TYPE(global, mnvda, type_class, 0);
> +	MLX5_SET_CONFIG_ITEM_TYPE(global, mnvda, parameter_index,
> +				  MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP);
> +	MLX5_SET_CONFIG_HDR_LEN(mnvda, nv_global_pci_cap);
> +
> +	return mlx5_nv_param_read(dev, mnvda, len);
> +}
> +
> +static int
> +mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev *dev, void *mnvda, size_t len)
> +{
> +	MLX5_SET_CONFIG_ITEM_TYPE(per_host_pf, mnvda, type_class, 3);
> +	MLX5_SET_CONFIG_ITEM_TYPE(per_host_pf, mnvda, parameter_index,
> +				  MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF);
> +	MLX5_SET_CONFIG_HDR_LEN(mnvda, nv_pf_pci_conf);
> +
> +	return mlx5_nv_param_read(dev, mnvda, len);
> +}
> +
> +static int mlx5_devlink_enable_sriov_get(struct devlink *devlink, u32 id,
> +					 struct devlink_param_gset_ctx *ctx)
> +{
> +	struct mlx5_core_dev *dev = devlink_priv(devlink);
> +	u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
> +	void *data;
> +	int err;
> +
> +	err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
> +	if (err)
> +		return err;
> +
> +	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
> +	if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
> +		ctx->val.vbool = false;
> +		return 0;
> +	}
> +
> +	memset(mnvda, 0, sizeof(mnvda));
> +	err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
> +	if (err)
> +		return err;
> +
> +	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
> +	if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
> +		ctx->val.vbool = MLX5_GET(nv_global_pci_conf, data, sriov_en);
> +		return 0;
> +	}
> +
> +	/* SRIOV is per PF */
> +	memset(mnvda, 0, sizeof(mnvda));
> +	err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
> +	if (err)
> +		return err;
> +
> +	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
> +	ctx->val.vbool = MLX5_GET(nv_pf_pci_conf, data, pf_total_vf_en);
> +	return 0;
> +}
> +
> +static int mlx5_devlink_enable_sriov_set(struct devlink *devlink, u32 id,
> +					 struct devlink_param_gset_ctx *ctx,
> +					 struct netlink_ext_ack *extack)
> +{
> +	struct mlx5_core_dev *dev = devlink_priv(devlink);
> +	u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
> +	bool per_pf_support;
> +	void *cap, *data;
> +	int err;
> +
> +	err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
> +	if (err) {
> +		NL_SET_ERR_MSG_MOD(extack, "Failed to read global PCI capability");
> +		return err;
> +	}
> +
> +	cap = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
> +	per_pf_support = MLX5_GET(nv_global_pci_cap, cap, per_pf_total_vf_supported);
> +
> +	if (!MLX5_GET(nv_global_pci_cap, cap, sriov_support)) {
> +		NL_SET_ERR_MSG_MOD(extack, "Not configurable on this device");
> +		return -EOPNOTSUPP;
> +	}
> +
> +	memset(mnvda, 0, sizeof(mnvda));
> +	err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
> +	if (err) {
> +		NL_SET_ERR_MSG_MOD(extack, "Unable to read global PCI configuration");
> +		return err;
> +	}
> +
> +	data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
> +	MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
> +	MLX5_SET(nv_global_pci_conf, data, sriov_en, ctx->val.vbool);
> +	MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, per_pf_support);
> +
> +	err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
> +	if (err) {
> +		NL_SET_ERR_MSG_MOD(extack, "Unable to write global PCI configuration");
> +		return err;
> +	}
> +
> +	if (!per_pf_support)
> +		return 0;
> +
> +	/* SRIOV is per PF */
> +	memset(mnvda, 0, sizeof(mnvda));
> +	err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
> +	if (err) {
> +		NL_SET_ERR_MSG_MOD(extack, "Unable to read per host PF configuration");
> +		return err;
> +	}
> +	MLX5_SET(nv_pf_pci_conf, data, pf_total_vf_en, ctx->val.vbool);
> +	return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
> +}
> +
>  static const struct devlink_param mlx5_nv_param_devlink_params[] = {
> +	DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
> +			      mlx5_devlink_enable_sriov_get,
> +			      mlx5_devlink_enable_sriov_set, NULL),
>  	DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE,
>  			     "cqe_compress_type", DEVLINK_PARAM_TYPE_STRING,
>  			     BIT(DEVLINK_PARAM_CMODE_PERMANENT),
> -- 
> 2.48.1
> 
> 


  parent reply	other threads:[~2025-03-04 16:44 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-28  2:12 [PATCH net-next 00/14] devlink, mlx5: Add new parameters for link management and SRIOV/eSwitch configurations Saeed Mahameed
2025-02-28  2:12 ` [PATCH net-next 01/14] devlink: define enum for attr types of dynamic attributes Saeed Mahameed
2025-03-06 12:05   ` Simon Horman
2025-03-19 22:45     ` Saeed Mahameed
2025-02-28  2:12 ` [PATCH net-next 02/14] devlink: Add 'total_vfs' generic device param Saeed Mahameed
2025-02-28 12:39   ` Jiri Pirko
2025-03-04 16:42   ` Kamal Heib
2025-02-28  2:12 ` [PATCH net-next 03/14] net/mlx5: Implement cqe_compress_type via devlink params Saeed Mahameed
2025-02-28  2:12 ` [PATCH net-next 04/14] net/mlx5: Implement devlink enable_sriov parameter Saeed Mahameed
2025-02-28 12:46   ` Jiri Pirko
2025-02-28 18:19     ` Saeed Mahameed
2025-03-03 11:35       ` Jiri Pirko
2025-03-03  2:27   ` kernel test robot
2025-03-04 16:43   ` Kamal Heib [this message]
2025-02-28  2:12 ` [PATCH net-next 05/14] net/mlx5: Implement devlink total_vfs parameter Saeed Mahameed
2025-03-04 16:45   ` Kamal Heib
2025-02-28  2:12 ` [PATCH net-next 06/14] devlink: pass struct devlink_port * as arg to devlink_nl_param_fill() Saeed Mahameed
2025-02-28  2:12 ` [PATCH net-next 07/14] devlink: Implement port params registration Saeed Mahameed
2025-02-28 11:58   ` Przemek Kitszel
2025-02-28 12:28     ` Jiri Pirko
2025-02-28 13:23       ` Przemek Kitszel
2025-02-28 15:21         ` Jiri Pirko
2025-03-20  8:16           ` Przemek Kitszel
2025-02-28  2:12 ` [PATCH net-next 08/14] devlink: Implement get/dump netlink commands for port params Saeed Mahameed
2025-02-28  2:12 ` [PATCH net-next 09/14] devlink: Implement set netlink command " Saeed Mahameed
2025-02-28 12:49   ` Jiri Pirko
2025-02-28  2:12 ` [PATCH net-next 10/14] devlink: Add 'keep_link_up' generic devlink device param Saeed Mahameed
2025-02-28 12:51   ` Jiri Pirko
2025-02-28  2:12 ` [PATCH net-next 11/14] net/mlx5: Implement devlink keep_link_up port parameter Saeed Mahameed
2025-02-28 12:51   ` Jiri Pirko
2025-02-28  2:12 ` [PATCH net-next 12/14] devlink: Throw extack messages on param value validation error Saeed Mahameed
2025-02-28 12:53   ` Jiri Pirko
2025-03-03  7:06   ` Dan Carpenter
2025-02-28  2:12 ` [PATCH net-next 13/14] devlink: Implement devlink param multi attribute nested data values Saeed Mahameed
2025-02-28  2:12 ` [PATCH net-next 14/14] net/mlx5: Implement eSwitch hairpin per prio buffers devlink params Saeed Mahameed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Z8ctuhBNsAcK_ZRH@fedora-x1 \
    --to=kheib@redhat.com \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=gal@nvidia.com \
    --cc=jiri@nvidia.com \
    --cc=kuba@kernel.org \
    --cc=leonro@nvidia.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=saeed@kernel.org \
    --cc=saeedm@nvidia.com \
    --cc=tariqt@nvidia.com \
    --cc=vdumitrescu@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox