netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Martin Habets <habetsm.xilinx@gmail.com>
To: Gautam Dawar <gautam.dawar@amd.com>
Cc: linux-net-drivers@amd.com, jasowang@redhat.com,
	Edward Cree <ecree.xilinx@gmail.com>,
	"David S. Miller" <davem@davemloft.net>,
	Eric Dumazet <edumazet@google.com>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
	Richard Cochran <richardcochran@gmail.com>,
	linux-kernel@vger.kernel.org, netdev@vger.kernel.org,
	eperezma@redhat.com, harpreet.anand@amd.com, tanuj.kamde@amd.com,
	koushik.dutta@amd.com
Subject: Re: [PATCH net-next v2 08/14] sfc: implement vdpa vring config operations
Date: Wed, 8 Mar 2023 17:06:06 +0000	[thread overview]
Message-ID: <ZAjAfoWwgVxsndgD@gmail.com> (raw)
In-Reply-To: <20230307113621.64153-9-gautam.dawar@amd.com>

On Tue, Mar 07, 2023 at 05:06:10PM +0530, Gautam Dawar wrote:
> This patch implements the vDPA config operations related to
> virtqueues or vrings. These include setting vring address,
> getting vq state, operations to enable/disable a vq etc.
> The resources required for vring operations eg. VI, interrupts etc.
> are also allocated.
> 
> Signed-off-by: Gautam Dawar <gautam.dawar@amd.com>
> ---
>  drivers/net/ethernet/sfc/ef100_vdpa.c     |  46 +++-
>  drivers/net/ethernet/sfc/ef100_vdpa.h     |  54 +++++
>  drivers/net/ethernet/sfc/ef100_vdpa_ops.c | 275 ++++++++++++++++++++++
>  3 files changed, 374 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/ethernet/sfc/ef100_vdpa.c b/drivers/net/ethernet/sfc/ef100_vdpa.c
> index 4c5a98c9d6c3..c66e5aef69ea 100644
> --- a/drivers/net/ethernet/sfc/ef100_vdpa.c
> +++ b/drivers/net/ethernet/sfc/ef100_vdpa.c
> @@ -14,6 +14,7 @@
>  #include "ef100_vdpa.h"
>  #include "mcdi_vdpa.h"
>  #include "mcdi_filters.h"
> +#include "mcdi_functions.h"
>  #include "ef100_netdev.h"
>  
>  static struct virtio_device_id ef100_vdpa_id_table[] = {
> @@ -47,12 +48,31 @@ int ef100_vdpa_init(struct efx_probe_data *probe_data)
>  	return rc;
>  }
>  
> +static int vdpa_allocate_vis(struct efx_nic *efx, unsigned int *allocated_vis)
> +{
> +	/* The first VI is reserved for MCDI
> +	 * 1 VI each for rx + tx ring
> +	 */
> +	unsigned int max_vis = 1 + EF100_VDPA_MAX_QUEUES_PAIRS;
> +	unsigned int min_vis = 1 + 1;
> +	int rc;
> +
> +	rc = efx_mcdi_alloc_vis(efx, min_vis, max_vis,
> +				NULL, allocated_vis);
> +	if (!rc)
> +		return rc;
> +	if (*allocated_vis < min_vis)
> +		return -ENOSPC;
> +	return 0;
> +}
> +
>  static void ef100_vdpa_delete(struct efx_nic *efx)
>  {
>  	if (efx->vdpa_nic) {
>  		/* replace with _vdpa_unregister_device later */
>  		put_device(&efx->vdpa_nic->vdpa_dev.dev);
>  	}
> +	efx_mcdi_free_vis(efx);
>  }
>  
>  void ef100_vdpa_fini(struct efx_probe_data *probe_data)
> @@ -104,9 +124,19 @@ static struct ef100_vdpa_nic *ef100_vdpa_create(struct efx_nic *efx,
>  {
>  	struct ef100_nic_data *nic_data = efx->nic_data;
>  	struct ef100_vdpa_nic *vdpa_nic;
> +	unsigned int allocated_vis;
>  	int rc;
> +	u8 i;
>  
>  	nic_data->vdpa_class = dev_type;
> +	rc = vdpa_allocate_vis(efx, &allocated_vis);
> +	if (rc) {
> +		pci_err(efx->pci_dev,
> +			"%s Alloc VIs failed for vf:%u error:%d\n",
> +			 __func__, nic_data->vf_index, rc);
> +		return ERR_PTR(rc);
> +	}
> +
>  	vdpa_nic = vdpa_alloc_device(struct ef100_vdpa_nic,
>  				     vdpa_dev, &efx->pci_dev->dev,
>  				     &ef100_vdpa_config_ops,
> @@ -117,7 +147,8 @@ static struct ef100_vdpa_nic *ef100_vdpa_create(struct efx_nic *efx,
>  			"vDPA device allocation failed for vf: %u\n",
>  			nic_data->vf_index);
>  		nic_data->vdpa_class = EF100_VDPA_CLASS_NONE;
> -		return ERR_PTR(-ENOMEM);
> +		rc = -ENOMEM;
> +		goto err_alloc_vis_free;
>  	}
>  
>  	mutex_init(&vdpa_nic->lock);
> @@ -125,11 +156,21 @@ static struct ef100_vdpa_nic *ef100_vdpa_create(struct efx_nic *efx,
>  	vdpa_nic->vdpa_dev.dma_dev = &efx->pci_dev->dev;
>  	vdpa_nic->vdpa_dev.mdev = efx->mgmt_dev;
>  	vdpa_nic->efx = efx;
> +	vdpa_nic->max_queue_pairs = allocated_vis - 1;
>  	vdpa_nic->pf_index = nic_data->pf_index;
>  	vdpa_nic->vf_index = nic_data->vf_index;
>  	vdpa_nic->vdpa_state = EF100_VDPA_STATE_INITIALIZED;
>  	vdpa_nic->mac_address = (u8 *)&vdpa_nic->net_config.mac;
>  
> +	for (i = 0; i < (2 * vdpa_nic->max_queue_pairs); i++) {
> +		rc = ef100_vdpa_init_vring(vdpa_nic, i);
> +		if (rc) {
> +			pci_err(efx->pci_dev,
> +				"vring init idx: %u failed, rc: %d\n", i, rc);
> +			goto err_put_device;
> +		}
> +	}
> +
>  	rc = get_net_config(vdpa_nic);
>  	if (rc)
>  		goto err_put_device;
> @@ -146,6 +187,9 @@ static struct ef100_vdpa_nic *ef100_vdpa_create(struct efx_nic *efx,
>  err_put_device:
>  	/* put_device invokes ef100_vdpa_free */
>  	put_device(&vdpa_nic->vdpa_dev.dev);
> +
> +err_alloc_vis_free:
> +	efx_mcdi_free_vis(efx);
>  	return ERR_PTR(rc);
>  }
>  
> diff --git a/drivers/net/ethernet/sfc/ef100_vdpa.h b/drivers/net/ethernet/sfc/ef100_vdpa.h
> index dcf4a8156415..348ca8a7404b 100644
> --- a/drivers/net/ethernet/sfc/ef100_vdpa.h
> +++ b/drivers/net/ethernet/sfc/ef100_vdpa.h
> @@ -32,6 +32,21 @@
>  /* Alignment requirement of the Virtqueue */
>  #define EF100_VDPA_VQ_ALIGN 4096
>  
> +/* Vring configuration definitions */
> +#define EF100_VRING_ADDRESS_CONFIGURED 0x1
> +#define EF100_VRING_SIZE_CONFIGURED 0x10
> +#define EF100_VRING_READY_CONFIGURED 0x100
> +#define EF100_VRING_CONFIGURED (EF100_VRING_ADDRESS_CONFIGURED | \
> +				EF100_VRING_SIZE_CONFIGURED | \
> +				EF100_VRING_READY_CONFIGURED)
> +#define EF100_VRING_CREATED 0x1000

I only see these defines used a bit masks. So why skip all the bits
in stead of using 0x2, 0x4, 0x8 respectively?

Martin

> +
> +/* Maximum size of msix name */
> +#define EF100_VDPA_MAX_MSIX_NAME_SIZE 256
> +
> +/* Default high IOVA for MCDI buffer */
> +#define EF100_VDPA_IOVA_BASE_ADDR 0x20000000000
> +
>  /**
>   * enum ef100_vdpa_nic_state - possible states for a vDPA NIC
>   *
> @@ -57,6 +72,41 @@ enum ef100_vdpa_vq_type {
>  	EF100_VDPA_VQ_NTYPES
>  };
>  
> +/**
> + * struct ef100_vdpa_vring_info - vDPA vring data structure
> + *
> + * @desc: Descriptor area address of the vring
> + * @avail: Available area address of the vring
> + * @used: Device area address of the vring
> + * @size: Number of entries in the vring
> + * @vring_state: bit map to track vring configuration
> + * @last_avail_idx: last available index of the vring
> + * @last_used_idx: last used index of the vring
> + * @doorbell_offset: doorbell offset
> + * @doorbell_offset_valid: true if @doorbell_offset is updated
> + * @vring_type: type of vring created
> + * @vring_ctx: vring context information
> + * @msix_name: device name for vring irq handler
> + * @irq: irq number for vring irq handler
> + * @cb: callback for vring interrupts
> + */
> +struct ef100_vdpa_vring_info {
> +	dma_addr_t desc;
> +	dma_addr_t avail;
> +	dma_addr_t used;
> +	u32 size;
> +	u16 vring_state;
> +	u32 last_avail_idx;
> +	u32 last_used_idx;
> +	u32 doorbell_offset;
> +	bool doorbell_offset_valid;
> +	enum ef100_vdpa_vq_type vring_type;
> +	struct efx_vring_ctx *vring_ctx;
> +	char msix_name[EF100_VDPA_MAX_MSIX_NAME_SIZE];
> +	u32 irq;
> +	struct vdpa_callback cb;
> +};
> +
>  /**
>   *  struct ef100_vdpa_nic - vDPA NIC data structure
>   *
> @@ -70,6 +120,7 @@ enum ef100_vdpa_vq_type {
>   * @features: negotiated feature bits
>   * @max_queue_pairs: maximum number of queue pairs supported
>   * @net_config: virtio_net_config data
> + * @vring: vring information of the vDPA device.
>   * @mac_address: mac address of interface associated with this vdpa device
>   * @mac_configured: true after MAC address is configured
>   * @cfg_cb: callback for config change
> @@ -86,6 +137,7 @@ struct ef100_vdpa_nic {
>  	u64 features;
>  	u32 max_queue_pairs;
>  	struct virtio_net_config net_config;
> +	struct ef100_vdpa_vring_info vring[EF100_VDPA_MAX_QUEUES_PAIRS * 2];
>  	u8 *mac_address;
>  	bool mac_configured;
>  	struct vdpa_callback cfg_cb;
> @@ -95,6 +147,8 @@ int ef100_vdpa_init(struct efx_probe_data *probe_data);
>  void ef100_vdpa_fini(struct efx_probe_data *probe_data);
>  int ef100_vdpa_register_mgmtdev(struct efx_nic *efx);
>  void ef100_vdpa_unregister_mgmtdev(struct efx_nic *efx);
> +void ef100_vdpa_irq_vectors_free(void *data);
> +int ef100_vdpa_init_vring(struct ef100_vdpa_nic *vdpa_nic, u16 idx);
>  
>  static inline bool efx_vdpa_is_little_endian(struct ef100_vdpa_nic *vdpa_nic)
>  {
> diff --git a/drivers/net/ethernet/sfc/ef100_vdpa_ops.c b/drivers/net/ethernet/sfc/ef100_vdpa_ops.c
> index a2364ef9f492..0051c4c0e47c 100644
> --- a/drivers/net/ethernet/sfc/ef100_vdpa_ops.c
> +++ b/drivers/net/ethernet/sfc/ef100_vdpa_ops.c
> @@ -9,13 +9,270 @@
>  
>  #include <linux/vdpa.h>
>  #include "ef100_vdpa.h"
> +#include "io.h"
>  #include "mcdi_vdpa.h"
>  
> +/* Get the queue's function-local index of the associated VI
> + * virtqueue number queue 0 is reserved for MCDI
> + */
> +#define EFX_GET_VI_INDEX(vq_num) (((vq_num) / 2) + 1)
> +
>  static struct ef100_vdpa_nic *get_vdpa_nic(struct vdpa_device *vdev)
>  {
>  	return container_of(vdev, struct ef100_vdpa_nic, vdpa_dev);
>  }
>  
> +void ef100_vdpa_irq_vectors_free(void *data)
> +{
> +	pci_free_irq_vectors(data);
> +}
> +
> +static int create_vring_ctx(struct ef100_vdpa_nic *vdpa_nic, u16 idx)
> +{
> +	struct efx_vring_ctx *vring_ctx;
> +	u32 vi_index;
> +
> +	if (idx % 2) /* Even VQ for RX and odd for TX */
> +		vdpa_nic->vring[idx].vring_type = EF100_VDPA_VQ_TYPE_NET_TXQ;
> +	else
> +		vdpa_nic->vring[idx].vring_type = EF100_VDPA_VQ_TYPE_NET_RXQ;
> +	vi_index = EFX_GET_VI_INDEX(idx);
> +	vring_ctx = efx_vdpa_vring_init(vdpa_nic->efx, vi_index,
> +					vdpa_nic->vring[idx].vring_type);
> +	if (IS_ERR(vring_ctx))
> +		return PTR_ERR(vring_ctx);
> +
> +	vdpa_nic->vring[idx].vring_ctx = vring_ctx;
> +	return 0;
> +}
> +
> +static void delete_vring_ctx(struct ef100_vdpa_nic *vdpa_nic, u16 idx)
> +{
> +	efx_vdpa_vring_fini(vdpa_nic->vring[idx].vring_ctx);
> +	vdpa_nic->vring[idx].vring_ctx = NULL;
> +}
> +
> +static void reset_vring(struct ef100_vdpa_nic *vdpa_nic, u16 idx)
> +{
> +	vdpa_nic->vring[idx].vring_type = EF100_VDPA_VQ_NTYPES;
> +	vdpa_nic->vring[idx].vring_state = 0;
> +	vdpa_nic->vring[idx].last_avail_idx = 0;
> +	vdpa_nic->vring[idx].last_used_idx = 0;
> +}
> +
> +int ef100_vdpa_init_vring(struct ef100_vdpa_nic *vdpa_nic, u16 idx)
> +{
> +	u32 offset;
> +	int rc;
> +
> +	vdpa_nic->vring[idx].irq = -EINVAL;
> +	rc = create_vring_ctx(vdpa_nic, idx);
> +	if (rc) {
> +		dev_err(&vdpa_nic->vdpa_dev.dev,
> +			"%s: create_vring_ctx failed, idx:%u, err:%d\n",
> +			__func__, idx, rc);
> +		return rc;
> +	}
> +
> +	rc = efx_vdpa_get_doorbell_offset(vdpa_nic->vring[idx].vring_ctx,
> +					  &offset);
> +	if (rc) {
> +		dev_err(&vdpa_nic->vdpa_dev.dev,
> +			"%s: get_doorbell failed idx:%u, err:%d\n",
> +			__func__, idx, rc);
> +		goto err_get_doorbell_offset;
> +	}
> +	vdpa_nic->vring[idx].doorbell_offset = offset;
> +	vdpa_nic->vring[idx].doorbell_offset_valid = true;
> +
> +	return 0;
> +
> +err_get_doorbell_offset:
> +	delete_vring_ctx(vdpa_nic, idx);
> +	return rc;
> +}
> +
> +static bool is_qid_invalid(struct ef100_vdpa_nic *vdpa_nic, u16 idx,
> +			   const char *caller)
> +{
> +	if (unlikely(idx >= (vdpa_nic->max_queue_pairs * 2))) {
> +		dev_err(&vdpa_nic->vdpa_dev.dev,
> +			"%s: Invalid qid %u\n", caller, idx);
> +		return true;
> +	}
> +	return false;
> +}
> +
> +static int ef100_vdpa_set_vq_address(struct vdpa_device *vdev,
> +				     u16 idx, u64 desc_area, u64 driver_area,
> +				     u64 device_area)
> +{
> +	struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
> +
> +	if (is_qid_invalid(vdpa_nic, idx, __func__))
> +		return -EINVAL;
> +
> +	mutex_lock(&vdpa_nic->lock);
> +	vdpa_nic->vring[idx].desc = desc_area;
> +	vdpa_nic->vring[idx].avail = driver_area;
> +	vdpa_nic->vring[idx].used = device_area;
> +	vdpa_nic->vring[idx].vring_state |= EF100_VRING_ADDRESS_CONFIGURED;
> +	mutex_unlock(&vdpa_nic->lock);
> +	return 0;
> +}
> +
> +static void ef100_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
> +{
> +	struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
> +
> +	if (is_qid_invalid(vdpa_nic, idx, __func__))
> +		return;
> +
> +	if (!is_power_of_2(num)) {
> +		dev_err(&vdev->dev, "%s: Index:%u size:%u not power of 2\n",
> +			__func__, idx, num);
> +		return;
> +	}
> +	if (num > EF100_VDPA_VQ_NUM_MAX_SIZE) {
> +		dev_err(&vdev->dev, "%s: Index:%u size:%u more than max:%u\n",
> +			__func__, idx, num, EF100_VDPA_VQ_NUM_MAX_SIZE);
> +		return;
> +	}
> +	mutex_lock(&vdpa_nic->lock);
> +	vdpa_nic->vring[idx].size  = num;
> +	vdpa_nic->vring[idx].vring_state |= EF100_VRING_SIZE_CONFIGURED;
> +	mutex_unlock(&vdpa_nic->lock);
> +}
> +
> +static void ef100_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
> +{
> +	struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
> +	u32 idx_val;
> +
> +	if (is_qid_invalid(vdpa_nic, idx, __func__))
> +		return;
> +
> +	if (!(vdpa_nic->vring[idx].vring_state & EF100_VRING_CREATED))
> +		return;
> +
> +	idx_val = idx;
> +	_efx_writed(vdpa_nic->efx, cpu_to_le32(idx_val),
> +		    vdpa_nic->vring[idx].doorbell_offset);
> +}
> +
> +static void ef100_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx,
> +				 struct vdpa_callback *cb)
> +{
> +	struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
> +
> +	if (is_qid_invalid(vdpa_nic, idx, __func__))
> +		return;
> +
> +	if (cb)
> +		vdpa_nic->vring[idx].cb = *cb;
> +}
> +
> +static void ef100_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx,
> +				    bool ready)
> +{
> +	struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
> +
> +	if (is_qid_invalid(vdpa_nic, idx, __func__))
> +		return;
> +
> +	mutex_lock(&vdpa_nic->lock);
> +	if (ready) {
> +		vdpa_nic->vring[idx].vring_state |=
> +					EF100_VRING_READY_CONFIGURED;
> +	} else {
> +		vdpa_nic->vring[idx].vring_state &=
> +					~EF100_VRING_READY_CONFIGURED;
> +	}
> +	mutex_unlock(&vdpa_nic->lock);
> +}
> +
> +static bool ef100_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
> +{
> +	struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
> +	bool ready;
> +
> +	if (is_qid_invalid(vdpa_nic, idx, __func__))
> +		return false;
> +
> +	mutex_lock(&vdpa_nic->lock);
> +	ready = vdpa_nic->vring[idx].vring_state & EF100_VRING_READY_CONFIGURED;
> +	mutex_unlock(&vdpa_nic->lock);
> +	return ready;
> +}
> +
> +static int ef100_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
> +				   const struct vdpa_vq_state *state)
> +{
> +	struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
> +
> +	if (is_qid_invalid(vdpa_nic, idx, __func__))
> +		return -EINVAL;
> +
> +	mutex_lock(&vdpa_nic->lock);
> +	vdpa_nic->vring[idx].last_avail_idx = state->split.avail_index;
> +	vdpa_nic->vring[idx].last_used_idx = state->split.avail_index;
> +	mutex_unlock(&vdpa_nic->lock);
> +	return 0;
> +}
> +
> +static int ef100_vdpa_get_vq_state(struct vdpa_device *vdev,
> +				   u16 idx, struct vdpa_vq_state *state)
> +{
> +	struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
> +
> +	if (is_qid_invalid(vdpa_nic, idx, __func__))
> +		return -EINVAL;
> +
> +	mutex_lock(&vdpa_nic->lock);
> +	state->split.avail_index = (u16)vdpa_nic->vring[idx].last_used_idx;
> +	mutex_unlock(&vdpa_nic->lock);
> +
> +	return 0;
> +}
> +
> +static struct vdpa_notification_area
> +		ef100_vdpa_get_vq_notification(struct vdpa_device *vdev,
> +					       u16 idx)
> +{
> +	struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
> +	struct vdpa_notification_area notify_area = {0, 0};
> +
> +	if (is_qid_invalid(vdpa_nic, idx, __func__))
> +		goto end;
> +
> +	mutex_lock(&vdpa_nic->lock);
> +	notify_area.addr = (uintptr_t)(vdpa_nic->efx->membase_phys +
> +				vdpa_nic->vring[idx].doorbell_offset);
> +	/* VDPA doorbells are at a stride of VI/2
> +	 * One VI stride is shared by both rx & tx doorbells
> +	 */
> +	notify_area.size = vdpa_nic->efx->vi_stride / 2;
> +	mutex_unlock(&vdpa_nic->lock);
> +
> +end:
> +	return notify_area;
> +}
> +
> +static int ef100_get_vq_irq(struct vdpa_device *vdev, u16 idx)
> +{
> +	struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
> +	u32 irq;
> +
> +	if (is_qid_invalid(vdpa_nic, idx, __func__))
> +		return -EINVAL;
> +
> +	mutex_lock(&vdpa_nic->lock);
> +	irq = vdpa_nic->vring[idx].irq;
> +	mutex_unlock(&vdpa_nic->lock);
> +
> +	return irq;
> +}
> +
>  static u32 ef100_vdpa_get_vq_align(struct vdpa_device *vdev)
>  {
>  	return EF100_VDPA_VQ_ALIGN;
> @@ -80,6 +337,8 @@ static void ef100_vdpa_set_config_cb(struct vdpa_device *vdev,
>  
>  	if (cb)
>  		vdpa_nic->cfg_cb = *cb;
> +	else
> +		memset(&vdpa_nic->cfg_cb, 0, sizeof(vdpa_nic->cfg_cb));
>  }
>  
>  static u16 ef100_vdpa_get_vq_num_max(struct vdpa_device *vdev)
> @@ -137,14 +396,30 @@ static void ef100_vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
>  static void ef100_vdpa_free(struct vdpa_device *vdev)
>  {
>  	struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
> +	int i;
>  
>  	if (vdpa_nic) {
> +		for (i = 0; i < (vdpa_nic->max_queue_pairs * 2); i++) {
> +			reset_vring(vdpa_nic, i);
> +			if (vdpa_nic->vring[i].vring_ctx)
> +				delete_vring_ctx(vdpa_nic, i);
> +		}
>  		mutex_destroy(&vdpa_nic->lock);
>  		vdpa_nic->efx->vdpa_nic = NULL;
>  	}
>  }
>  
>  const struct vdpa_config_ops ef100_vdpa_config_ops = {
> +	.set_vq_address	     = ef100_vdpa_set_vq_address,
> +	.set_vq_num	     = ef100_vdpa_set_vq_num,
> +	.kick_vq	     = ef100_vdpa_kick_vq,
> +	.set_vq_cb	     = ef100_vdpa_set_vq_cb,
> +	.set_vq_ready	     = ef100_vdpa_set_vq_ready,
> +	.get_vq_ready	     = ef100_vdpa_get_vq_ready,
> +	.set_vq_state	     = ef100_vdpa_set_vq_state,
> +	.get_vq_state	     = ef100_vdpa_get_vq_state,
> +	.get_vq_notification = ef100_vdpa_get_vq_notification,
> +	.get_vq_irq          = ef100_get_vq_irq,
>  	.get_vq_align	     = ef100_vdpa_get_vq_align,
>  	.get_device_features = ef100_vdpa_get_device_features,
>  	.set_driver_features = ef100_vdpa_set_driver_features,
> -- 
> 2.30.1

  reply	other threads:[~2023-03-08 17:06 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-07 11:36 [PATCH net-next v2 00/14] sfc: add vDPA support for EF100 devices Gautam Dawar
2023-03-07 11:36 ` [PATCH net-next v2 01/14] sfc: add function personality " Gautam Dawar
2023-03-10  5:04   ` Jason Wang
2023-03-13 11:50     ` Martin Habets
2023-03-15  5:11       ` Jason Wang
2023-03-16  9:06         ` Martin Habets
2023-03-17  3:52           ` Jason Wang
2023-03-21 12:17             ` Martin Habets
2023-03-07 11:36 ` [PATCH net-next v2 02/14] sfc: implement MCDI interface for vDPA operations Gautam Dawar
2023-03-07 11:36 ` [PATCH net-next v2 03/14] sfc: update MCDI headers for CLIENT_CMD_VF_PROXY capability bit Gautam Dawar
     [not found]   ` <9a8c3a7d-7815-3099-e75e-930568dccb35@amd.com>
2023-03-08 16:15     ` Martin Habets
2023-03-13 14:38       ` Gautam Dawar
2023-03-14  8:32         ` Martin Habets
2023-03-07 11:36 ` [PATCH net-next v2 04/14] sfc: evaluate vdpa support based on FW capability CLIENT_CMD_VF_PROXY Gautam Dawar
2023-03-10  5:04   ` Jason Wang
2023-03-13 12:39     ` Gautam Dawar
2023-03-14  8:38       ` Martin Habets
2023-03-17 11:18         ` Gautam Dawar
2023-03-07 11:36 ` [PATCH net-next v2 05/14] sfc: implement init and fini functions for vDPA personality Gautam Dawar
2023-03-07 11:36 ` [PATCH net-next v2 06/14] sfc: implement vDPA management device operations Gautam Dawar
2023-03-08 16:41   ` Martin Habets
2023-03-13 15:09     ` Gautam Dawar
     [not found]   ` <e773fcb5-985d-071f-25dd-1aaacd393922@amd.com>
2023-03-08 16:48     ` Martin Habets
2023-03-07 11:36 ` [PATCH net-next v2 07/14] sfc: implement vdpa device config operations Gautam Dawar
2023-03-07 11:36 ` [PATCH net-next v2 08/14] sfc: implement vdpa vring " Gautam Dawar
2023-03-08 17:06   ` Martin Habets [this message]
2023-03-13 17:03     ` Gautam Dawar
2023-03-10  5:04   ` Jason Wang
2023-03-13 12:33     ` Gautam Dawar
2023-03-15  5:08       ` Jason Wang
2023-03-15 17:06         ` Gautam Dawar
2023-03-07 11:36 ` [PATCH net-next v2 09/14] sfc: implement device status related vdpa " Gautam Dawar
2023-03-10  5:05   ` Jason Wang
2023-03-13 12:10     ` Gautam Dawar
2023-03-15  5:00       ` Jason Wang
2023-03-15 17:18         ` Gautam Dawar
2023-03-17 11:01           ` Gautam Dawar
2023-03-07 11:36 ` [PATCH net-next v2 10/14] sfc: implement filters for receiving traffic Gautam Dawar
2023-03-10  5:05   ` Jason Wang
2023-03-13  9:19     ` Gautam Dawar
2023-03-07 11:36 ` [PATCH net-next v2 11/14] sfc: use PF's IOMMU domain for running VF's MCDI commands Gautam Dawar
2023-03-08 18:01   ` Martin Habets
2023-03-13 17:19     ` Gautam Dawar
2023-03-14  8:48       ` Martin Habets
2023-03-10  5:05   ` Jason Wang
2023-03-13  7:35     ` Gautam Dawar
2023-03-07 11:36 ` [PATCH net-next v2 12/14] sfc: unmap VF's MCDI buffer when switching to vDPA mode Gautam Dawar
2023-03-10  5:05   ` Jason Wang
2023-03-13  7:09     ` Gautam Dawar
2023-03-15  4:54       ` Jason Wang
2023-03-07 11:36 ` [PATCH net-next v2 13/14] sfc: update vdpa device MAC address Gautam Dawar
2023-03-10  5:05   ` Jason Wang
2023-03-13  6:37     ` Gautam Dawar
2023-03-15  4:50       ` Jason Wang
2023-03-07 11:36 ` [PATCH net-next v2 14/14] sfc: register the vDPA device Gautam Dawar
2023-03-10  5:09 ` [PATCH net-next v2 00/14] sfc: add vDPA support for EF100 devices Jason Wang
2023-03-13  6:26   ` Gautam Dawar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZAjAfoWwgVxsndgD@gmail.com \
    --to=habetsm.xilinx@gmail.com \
    --cc=davem@davemloft.net \
    --cc=ecree.xilinx@gmail.com \
    --cc=edumazet@google.com \
    --cc=eperezma@redhat.com \
    --cc=gautam.dawar@amd.com \
    --cc=harpreet.anand@amd.com \
    --cc=jasowang@redhat.com \
    --cc=koushik.dutta@amd.com \
    --cc=kuba@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-net-drivers@amd.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=richardcochran@gmail.com \
    --cc=tanuj.kamde@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).