From: Frank Li <Frank.li@nxp.com>
To: Wei Fang <wei.fang@nxp.com>
Cc: shenwei.wang@nxp.com, xiaoning.wang@nxp.com,
andrew+netdev@lunn.ch, davem@davemloft.net, edumazet@google.com,
kuba@kernel.org, pabeni@redhat.com, ast@kernel.org,
daniel@iogearbox.net, hawk@kernel.org, john.fastabend@gmail.com,
sdf@fomichev.me, netdev@vger.kernel.org,
linux-kernel@vger.kernel.org, imx@lists.linux.dev,
bpf@vger.kernel.org
Subject: Re: [PATCH v2 net-next 12/14] net: fec: add fec_alloc_rxq_buffers_pp() to allocate buffers from page pool
Date: Fri, 16 Jan 2026 09:38:42 -0500 [thread overview]
Message-ID: <aWpNcsoiQX7WESis@lizhi-Precision-Tower-5810> (raw)
In-Reply-To: <20260116074027.1603841-13-wei.fang@nxp.com>
On Fri, Jan 16, 2026 at 03:40:25PM +0800, Wei Fang wrote:
> Currently, the buffers of RX queue are allocated from the page pool. In
> the subsequent patches to support XDP zero copy, the RX buffers will be
> allocated from the UMEM. Therefore, extract fec_alloc_rxq_buffers_pp()
> from fec_enet_alloc_rxq_buffers() and we will add another helper to
> allocate RX buffers from UMEM for the XDP zero copy mode.
>
> Signed-off-by: Wei Fang <wei.fang@nxp.com>
> ---
> drivers/net/ethernet/freescale/fec_main.c | 78 ++++++++++++++++-------
> 1 file changed, 54 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
> index a418f0153d43..68aa94dd9487 100644
> --- a/drivers/net/ethernet/freescale/fec_main.c
> +++ b/drivers/net/ethernet/freescale/fec_main.c
> @@ -3435,6 +3435,24 @@ static void fec_xdp_rxq_info_unreg(struct fec_enet_priv_rx_q *rxq)
> }
> }
>
> +static void fec_free_rxq_buffers(struct fec_enet_priv_rx_q *rxq)
> +{
> + int i;
> +
> + for (i = 0; i < rxq->bd.ring_size; i++) {
> + struct page *page = rxq->rx_buf[i];
> +
> + if (!page)
> + continue;
> +
> + page_pool_put_full_page(rxq->page_pool, page, false);
> + rxq->rx_buf[i] = NULL;
> + }
> +
> + page_pool_destroy(rxq->page_pool);
> + rxq->page_pool = NULL;
> +}
> +
> static void fec_enet_free_buffers(struct net_device *ndev)
> {
> struct fec_enet_private *fep = netdev_priv(ndev);
> @@ -3448,16 +3466,10 @@ static void fec_enet_free_buffers(struct net_device *ndev)
> rxq = fep->rx_queue[q];
>
> fec_xdp_rxq_info_unreg(rxq);
> -
> - for (i = 0; i < rxq->bd.ring_size; i++)
> - page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
> - false);
> + fec_free_rxq_buffers(rxq);
>
> for (i = 0; i < XDP_STATS_TOTAL; i++)
> rxq->stats[i] = 0;
> -
> - page_pool_destroy(rxq->page_pool);
> - rxq->page_pool = NULL;
> }
>
> for (q = 0; q < fep->num_tx_queues; q++) {
> @@ -3556,22 +3568,18 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
> return ret;
> }
>
> -static int
> -fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
> +static int fec_alloc_rxq_buffers_pp(struct fec_enet_private *fep,
> + struct fec_enet_priv_rx_q *rxq)
> {
> - struct fec_enet_private *fep = netdev_priv(ndev);
> - struct fec_enet_priv_rx_q *rxq;
> + struct bufdesc *bdp = rxq->bd.base;
> dma_addr_t phys_addr;
> - struct bufdesc *bdp;
> struct page *page;
> int i, err;
>
> - rxq = fep->rx_queue[queue];
> - bdp = rxq->bd.base;
> -
> err = fec_enet_create_page_pool(fep, rxq);
> if (err < 0) {
> - netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
> + netdev_err(fep->netdev, "%s failed queue %d (%d)\n",
> + __func__, rxq->bd.qid, err);
> return err;
> }
>
> @@ -3590,8 +3598,10 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
>
> for (i = 0; i < rxq->bd.ring_size; i++) {
> page = page_pool_dev_alloc_pages(rxq->page_pool);
> - if (!page)
> - goto err_alloc;
> + if (!page) {
> + err = -ENOMEM;
> + goto free_rx_buffers;
look like this part is bug fix, miss set err to -ENOMEM
> + }
>
> phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
> bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
> @@ -3601,6 +3611,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
>
> if (fep->bufdesc_ex) {
> struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
> +
uneccesary change
Frank
> ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
> }
>
> @@ -3611,15 +3622,34 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
> bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
> bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
>
> - err = fec_xdp_rxq_info_reg(fep, rxq);
> + return 0;
> +
> +free_rx_buffers:
> + fec_free_rxq_buffers(rxq);
> +
> + return err;
> +}
> +
> +static int
> +fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
> +{
> + struct fec_enet_private *fep = netdev_priv(ndev);
> + struct fec_enet_priv_rx_q *rxq;
> + int err;
> +
> + rxq = fep->rx_queue[queue];
> + err = fec_alloc_rxq_buffers_pp(fep, rxq);
> if (err)
> - goto err_alloc;
> + return err;
>
> - return 0;
> + err = fec_xdp_rxq_info_reg(fep, rxq);
> + if (err) {
> + fec_free_rxq_buffers(rxq);
>
> - err_alloc:
> - fec_enet_free_buffers(ndev);
> - return -ENOMEM;
> + return err;
> + }
> +
> + return 0;
> }
>
> static int
> --
> 2.34.1
>
next prev parent reply other threads:[~2026-01-16 14:38 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-16 7:40 [PATCH v2 net-next 00/14] net: fec: improve XDP copy mode and add AF_XDP zero-copy support Wei Fang
2026-01-16 7:40 ` [PATCH v2 net-next 01/14] net: fec: add fec_txq_trigger_xmit() helper Wei Fang
2026-01-16 7:40 ` [PATCH v2 net-next 02/14] net: fec: add fec_rx_error_check() to check RX errors Wei Fang
2026-01-16 7:40 ` [PATCH v2 net-next 03/14] net: fec: add rx_shift to indicate the extra bytes padded in front of RX frame Wei Fang
2026-01-16 7:40 ` [PATCH v2 net-next 04/14] net: fec: add fec_build_skb() to build a skb Wei Fang
2026-01-16 14:01 ` Frank Li
2026-01-16 7:40 ` [PATCH v2 net-next 05/14] net: fec: improve fec_enet_rx_queue() Wei Fang
2026-01-16 14:03 ` Frank Li
2026-01-16 7:40 ` [PATCH v2 net-next 06/14] net: fec: add fec_enet_rx_queue_xdp() for XDP path Wei Fang
2026-01-16 14:19 ` Frank Li
2026-01-17 2:32 ` Wei Fang
2026-01-16 7:40 ` [PATCH v2 net-next 07/14] net: fec: transmit XDP frames in bulk Wei Fang
2026-01-16 14:04 ` Frank Li
2026-01-16 7:40 ` [PATCH v2 net-next 08/14] net: fec: remove unnecessary NULL pointer check when clearing TX BD ring Wei Fang
2026-01-16 14:21 ` Frank Li
2026-01-16 7:40 ` [PATCH v2 net-next 09/14] net: fec: use switch statement to check the type of tx_buf Wei Fang
2026-01-16 7:40 ` [PATCH v2 net-next 10/14] net: fec: remove the size parameter from fec_enet_create_page_pool() Wei Fang
2026-01-16 14:26 ` Frank Li
2026-01-16 7:40 ` [PATCH v2 net-next 11/14] net: fec: move xdp_rxq_info* APIs out of fec_enet_create_page_pool() Wei Fang
2026-01-16 14:34 ` Frank Li
2026-01-20 6:39 ` Wei Fang
2026-01-16 7:40 ` [PATCH v2 net-next 12/14] net: fec: add fec_alloc_rxq_buffers_pp() to allocate buffers from page pool Wei Fang
2026-01-16 14:38 ` Frank Li [this message]
2026-01-17 2:16 ` Wei Fang
2026-01-20 7:30 ` Wei Fang
2026-01-16 7:40 ` [PATCH v2 net-next 13/14] net: fec: improve fec_enet_tx_queue() Wei Fang
2026-01-16 14:40 ` Frank Li
2026-01-16 7:40 ` [PATCH v2 net-next 14/14] net: fec: add AF_XDP zero-copy support Wei Fang
2026-01-16 12:28 ` kernel test robot
2026-01-16 14:47 ` Frank Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aWpNcsoiQX7WESis@lizhi-Precision-Tower-5810 \
--to=frank.li@nxp.com \
--cc=andrew+netdev@lunn.ch \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=hawk@kernel.org \
--cc=imx@lists.linux.dev \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=sdf@fomichev.me \
--cc=shenwei.wang@nxp.com \
--cc=wei.fang@nxp.com \
--cc=xiaoning.wang@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox