netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Simon Horman <horms@kernel.org>
To: Shinas Rasheed <srasheed@marvell.com>
Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
	hgani@marvell.com, vimleshk@marvell.com, sedara@marvell.com,
	egallen@redhat.com, mschmidt@redhat.com, pabeni@redhat.com,
	kuba@kernel.org, wizhao@redhat.com, kheib@redhat.com,
	konguyen@redhat.com, Veerasenareddy Burru <vburru@marvell.com>,
	Satananda Burla <sburla@marvell.com>,
	"David S. Miller" <davem@davemloft.net>,
	Eric Dumazet <edumazet@google.com>
Subject: Re: [PATCH net-next v2 4/8] octeon_ep_vf: add Tx/Rx ring resource setup and cleanup
Date: Fri, 5 Jan 2024 10:50:59 +0000	[thread overview]
Message-ID: <20240105105059.GS31813@kernel.org> (raw)
In-Reply-To: <20231223134000.2906144-5-srasheed@marvell.com>

On Sat, Dec 23, 2023 at 05:39:56AM -0800, Shinas Rasheed wrote:
> Implement Tx/Rx ring resource allocation and cleanup.
> 
> Signed-off-by: Shinas Rasheed <srasheed@marvell.com>

Hi Shinas,

some minor feedback from my side.

...

> diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c

...

> +/**
> + * octep_vf_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
> + *
> + * @oq: Octeon Rx queue data structure.
> + *
> + * Return: 0, if successfully filled receive buffers for all descriptors.
> + *         -1, if failed to allocate a buffer or failed to map for DMA.

I think it is more idiomatic to use well known error codes
in kernel code. In this case, perhaps -ENOMEM.

Likewise elsewhere in this patch.

> + */
> +static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq)
> +{
> +	struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
> +	struct page *page;
> +	u32 i;
> +
> +	for (i = 0; i < oq->max_count; i++) {
> +		page = dev_alloc_page();
> +		if (unlikely(!page)) {
> +			dev_err(oq->dev, "Rx buffer alloc failed\n");
> +			goto rx_buf_alloc_err;
> +		}
> +		desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
> +						       PAGE_SIZE,
> +						       DMA_FROM_DEVICE);
> +		if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
> +			dev_err(oq->dev,
> +				"OQ-%d buffer alloc: DMA mapping error!\n",
> +				oq->q_no);
> +			put_page(page);
> +			goto dma_map_err;

nit: I think put_page() can be moved to the dma_map_err label.

> +		}
> +		oq->buff_info[i].page = page;
> +	}
> +
> +	return 0;
> +
> +dma_map_err:
> +rx_buf_alloc_err:
> +	while (i) {
> +		i--;
> +		dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
> +		put_page(oq->buff_info[i].page);
> +		oq->buff_info[i].page = NULL;
> +	}
> +
> +	return -1;
> +}
> +
> +/**
> + * octep_vf_setup_oq() - Setup a Rx queue.
> + *
> + * @oct: Octeon device private data structure.
> + * @q_no: Rx queue number to be setup.
> + *
> + * Allocate resources for a Rx queue.
> + */
> +static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
> +{
> +	struct octep_vf_oq *oq;
> +	u32 desc_ring_size;
> +
> +	oq = vzalloc(sizeof(*oq));
> +	if (!oq)
> +		goto create_oq_fail;
> +	oct->oq[q_no] = oq;
> +
> +	oq->octep_vf_dev = oct;
> +	oq->netdev = oct->netdev;
> +	oq->dev = &oct->pdev->dev;
> +	oq->q_no = q_no;
> +	oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
> +	oq->ring_size_mask = oq->max_count - 1;
> +	oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
> +	oq->max_single_buffer_size = oq->buffer_size - OCTEP_VF_OQ_RESP_HW_SIZE;
> +
> +	/* When the hardware/firmware supports additional capabilities,
> +	 * additional header is filled-in by Octeon after length field in
> +	 * Rx packets. this header contains additional packet information.
> +	 */
> +	if (oct->fw_info.rx_ol_flags)
> +		oq->max_single_buffer_size -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
> +
> +	oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
> +
> +	desc_ring_size = oq->max_count * OCTEP_VF_OQ_DESC_SIZE;
> +	oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
> +					   &oq->desc_ring_dma, GFP_KERNEL);
> +
> +	if (unlikely(!oq->desc_ring)) {
> +		dev_err(oq->dev,
> +			"Failed to allocate DMA memory for OQ-%d !!\n", q_no);
> +		goto desc_dma_alloc_err;
> +	}
> +
> +	oq->buff_info = (struct octep_vf_rx_buffer *)
> +			vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE);

nit: there is no need to cast a void pointer.

	oq->buff_info = vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE);

> +	if (unlikely(!oq->buff_info)) {
> +		dev_err(&oct->pdev->dev,
> +			"Failed to allocate buffer info for OQ-%d\n", q_no);
> +		goto buf_list_err;
> +	}
> +
> +	if (octep_vf_oq_fill_ring_buffers(oq))
> +		goto oq_fill_buff_err;
> +
> +	octep_vf_oq_reset_indices(oq);
> +	oct->hw_ops.setup_oq_regs(oct, q_no);
> +	oct->num_oqs++;
> +
> +	return 0;
> +
> +oq_fill_buff_err:
> +	vfree(oq->buff_info);
> +	oq->buff_info = NULL;
> +buf_list_err:
> +	dma_free_coherent(oq->dev, desc_ring_size,
> +			  oq->desc_ring, oq->desc_ring_dma);
> +	oq->desc_ring = NULL;
> +desc_dma_alloc_err:
> +	vfree(oq);
> +	oct->oq[q_no] = NULL;
> +create_oq_fail:
> +	return -1;
> +}

...

> +/**
> + * octep_vf_free_oq() - Free Rx queue resources.
> + *
> + * @oq: Octeon Rx queue data structure.
> + *
> + * Free all resources of a Rx queue.
> + */
> +static int octep_vf_free_oq(struct octep_vf_oq *oq)
> +{
> +	struct octep_vf_device *oct = oq->octep_vf_dev;
> +	int q_no = oq->q_no;
> +
> +	octep_vf_oq_free_ring_buffers(oq);
> +
> +	if (oq->buff_info)
> +		vfree(oq->buff_info);

nit: there is no need to check for NULL as vfree() can handle
     a NULL argument.

> +
> +	if (oq->desc_ring)
> +		dma_free_coherent(oq->dev,
> +				  oq->max_count * OCTEP_VF_OQ_DESC_SIZE,
> +				  oq->desc_ring, oq->desc_ring_dma);
> +
> +	vfree(oq);
> +	oct->oq[q_no] = NULL;
> +	oct->num_oqs--;
> +	return 0;
> +}

...

> +/**
> + * octep_vf_free_iq() - Free Tx queue resources.
> + *
> + * @iq: Octeon Tx queue data structure.
> + *
> + * Free all the resources allocated for a Tx queue.
> + */
> +static void octep_vf_free_iq(struct octep_vf_iq *iq)
> +{
> +	struct octep_vf_device *oct = iq->octep_vf_dev;
> +	u64 desc_ring_size, sglist_size;
> +	int q_no = iq->q_no;
> +
> +	desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
> +
> +	if (iq->buff_info)
> +		vfree(iq->buff_info);

Ditto.

> +
> +	if (iq->desc_ring)
> +		dma_free_coherent(iq->dev, desc_ring_size,
> +				  iq->desc_ring, iq->desc_ring_dma);
> +
> +	sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
> +		      CFG_GET_IQ_NUM_DESC(oct->conf);
> +	if (iq->sglist)
> +		dma_free_coherent(iq->dev, sglist_size,
> +				  iq->sglist, iq->sglist_dma);
> +
> +	vfree(iq);
> +	oct->iq[q_no] = NULL;
> +	oct->num_iqs--;
>  }

...

  reply	other threads:[~2024-01-05 10:51 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-23 13:39 [PATCH net-next v2 0/8] add octeon_ep_vf driver Shinas Rasheed
2023-12-23 13:39 ` [PATCH net-next v2 1/8] octeon_ep_vf: Add driver framework and device initialization Shinas Rasheed
2024-01-05 10:58   ` Simon Horman
2023-12-23 13:39 ` [PATCH net-next v2 2/8] octeon_ep_vf: add hardware configuration APIs Shinas Rasheed
2024-01-05 10:22   ` Simon Horman
2023-12-23 13:39 ` [PATCH net-next v2 3/8] octeon_ep_vf: add VF-PF mailbox communication Shinas Rasheed
2023-12-23 13:39 ` [PATCH net-next v2 4/8] octeon_ep_vf: add Tx/Rx ring resource setup and cleanup Shinas Rasheed
2024-01-05 10:50   ` Simon Horman [this message]
2023-12-23 13:39 ` [PATCH net-next v2 5/8] octeon_ep_vf: add support for ndo ops Shinas Rasheed
2023-12-23 13:39 ` [PATCH net-next v2 6/8] octeon_ep_vf: add Tx/Rx processing and interrupt support Shinas Rasheed
2024-01-04 21:00   ` Jakub Kicinski
2024-01-05  7:26     ` [EXT] " Shinas Rasheed
2024-01-05 11:05       ` Simon Horman
2024-01-05 15:08         ` Shinas Rasheed
2023-12-23 13:39 ` [PATCH net-next v2 7/8] octeon_ep_vf: add ethtool support Shinas Rasheed
2024-01-04 20:53   ` Jakub Kicinski
2023-12-23 13:40 ` [PATCH net-next v2 8/8] octeon_ep_vf: update MAINTAINERS Shinas Rasheed
2024-01-02 14:30 ` [PATCH net-next v2 0/8] add octeon_ep_vf driver patchwork-bot+netdevbpf
2024-01-04 21:03   ` Jakub Kicinski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240105105059.GS31813@kernel.org \
    --to=horms@kernel.org \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=egallen@redhat.com \
    --cc=hgani@marvell.com \
    --cc=kheib@redhat.com \
    --cc=konguyen@redhat.com \
    --cc=kuba@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mschmidt@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=sburla@marvell.com \
    --cc=sedara@marvell.com \
    --cc=srasheed@marvell.com \
    --cc=vburru@marvell.com \
    --cc=vimleshk@marvell.com \
    --cc=wizhao@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).