From: Wei Fang <wei.fang@nxp.com>
To: shenwei.wang@nxp.com, xiaoning.wang@nxp.com, frank.li@nxp.com,
andrew+netdev@lunn.ch, davem@davemloft.net, edumazet@google.com,
kuba@kernel.org, pabeni@redhat.com, ast@kernel.org,
daniel@iogearbox.net, hawk@kernel.org, john.fastabend@gmail.com,
sdf@fomichev.me, horms@kernel.org
Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
imx@lists.linux.dev, bpf@vger.kernel.org
Subject: [PATCH v6 net-next 13/15] net: fec: add fec_alloc_rxq_buffers_pp() to allocate buffers from page pool
Date: Tue, 3 Feb 2026 13:23:27 +0800 [thread overview]
Message-ID: <20260203052329.1085444-14-wei.fang@nxp.com> (raw)
In-Reply-To: <20260203052329.1085444-1-wei.fang@nxp.com>
Currently, the buffers of RX queue are allocated from the page pool. In
the subsequent patches to support XDP zero copy, the RX buffers will be
allocated from the UMEM. Therefore, extract fec_alloc_rxq_buffers_pp()
from fec_enet_alloc_rxq_buffers() and we will add another helper to
allocate RX buffers from UMEM for the XDP zero copy mode. In addition,
fec_alloc_rxq_buffers_pp() only initializes bdp->bufaddr and does not
initialize other fields of bdp, because these will be initialized in
fec_enet_bd_init().
Signed-off-by: Wei Fang <wei.fang@nxp.com>
---
drivers/net/ethernet/freescale/fec_main.c | 90 +++++++++++++++--------
1 file changed, 58 insertions(+), 32 deletions(-)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 31a27f1cbf39..cb03b45b7951 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -990,6 +990,13 @@ static void fec_enet_bd_init(struct net_device *dev)
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
else
bdp->cbd_sc = cpu_to_fec16(0);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ }
+
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
@@ -3435,6 +3442,24 @@ static void fec_xdp_rxq_info_unreg(struct fec_enet_priv_rx_q *rxq)
}
}
+static void fec_free_rxq_buffers(struct fec_enet_priv_rx_q *rxq)
+{
+ int i;
+
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ struct page *page = rxq->rx_buf[i];
+
+ if (!page)
+ continue;
+
+ page_pool_put_full_page(rxq->page_pool, page, false);
+ rxq->rx_buf[i] = NULL;
+ }
+
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+}
+
static void fec_enet_free_buffers(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -3448,16 +3473,10 @@ static void fec_enet_free_buffers(struct net_device *ndev)
rxq = fep->rx_queue[q];
fec_xdp_rxq_info_unreg(rxq);
-
- for (i = 0; i < rxq->bd.ring_size; i++)
- page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
- false);
+ fec_free_rxq_buffers(rxq);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
-
- page_pool_destroy(rxq->page_pool);
- rxq->page_pool = NULL;
}
for (q = 0; q < fep->num_tx_queues; q++) {
@@ -3556,22 +3575,18 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
return ret;
}
-static int
-fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+static int fec_alloc_rxq_buffers_pp(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_enet_priv_rx_q *rxq;
+ struct bufdesc *bdp = rxq->bd.base;
dma_addr_t phys_addr;
- struct bufdesc *bdp;
struct page *page;
int i, err;
- rxq = fep->rx_queue[queue];
- bdp = rxq->bd.base;
-
err = fec_enet_create_page_pool(fep, rxq);
if (err < 0) {
- netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
+ netdev_err(fep->netdev, "%s failed queue %d (%d)\n",
+ __func__, rxq->bd.qid, err);
return err;
}
@@ -3590,36 +3605,47 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
for (i = 0; i < rxq->bd.ring_size; i++) {
page = page_pool_dev_alloc_pages(rxq->page_pool);
- if (!page)
- goto err_alloc;
+ if (!page) {
+ err = -ENOMEM;
+ goto free_rx_buffers;
+ }
phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
-
rxq->rx_buf[i] = page;
- bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
-
- if (fep->bufdesc_ex) {
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
- }
-
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
- /* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
+ return 0;
+
+free_rx_buffers:
+ fec_free_rxq_buffers(rxq);
+
+ return err;
+}
+
+static int
+fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_rx_q *rxq;
+ int err;
+
+ rxq = fep->rx_queue[queue];
+ err = fec_alloc_rxq_buffers_pp(fep, rxq);
+ if (err)
+ goto free_buffers;
err = fec_xdp_rxq_info_reg(fep, rxq);
if (err)
- goto err_alloc;
+ goto free_buffers;
return 0;
- err_alloc:
+free_buffers:
fec_enet_free_buffers(ndev);
- return -ENOMEM;
+
+ return err;
}
static int
--
2.34.1
next prev parent reply other threads:[~2026-02-03 5:24 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-03 5:23 [PATCH v6 net-next 00/15] net: fec: improve XDP copy mode and add AF_XDP zero-copy support Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 01/15] net: fec: add fec_txq_trigger_xmit() helper Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 02/15] net: fec: add fec_rx_error_check() to check RX errors Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 03/15] net: fec: add rx_shift to indicate the extra bytes padded in front of RX frame Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 04/15] net: fec: add fec_build_skb() to build a skb Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 05/15] net: fec: improve fec_enet_rx_queue() Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 06/15] net: fec: add fec_enet_rx_queue_xdp() for XDP path Wei Fang
2026-02-05 5:28 ` [v6,net-next,06/15] " Jakub Kicinski
2026-02-05 6:16 ` Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 07/15] net: fec: add tx_qid parameter to fec_enet_xdp_tx_xmit() Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 08/15] net: fec: transmit XDP frames in bulk Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 09/15] net: fec: remove unnecessary NULL pointer check when clearing TX BD ring Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 10/15] net: fec: use switch statement to check the type of tx_buf Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 11/15] net: fec: remove the size parameter from fec_enet_create_page_pool() Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 12/15] net: fec: move xdp_rxq_info* APIs out of fec_enet_create_page_pool() Wei Fang
2026-02-03 5:23 ` Wei Fang [this message]
2026-02-03 5:23 ` [PATCH v6 net-next 14/15] net: fec: improve fec_enet_tx_queue() Wei Fang
2026-02-03 5:23 ` [PATCH v6 net-next 15/15] net: fec: add AF_XDP zero-copy support Wei Fang
2026-02-05 5:28 ` [v6,net-next,15/15] " Jakub Kicinski
2026-02-05 6:04 ` Wei Fang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260203052329.1085444-14-wei.fang@nxp.com \
--to=wei.fang@nxp.com \
--cc=andrew+netdev@lunn.ch \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=frank.li@nxp.com \
--cc=hawk@kernel.org \
--cc=horms@kernel.org \
--cc=imx@lists.linux.dev \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=sdf@fomichev.me \
--cc=shenwei.wang@nxp.com \
--cc=xiaoning.wang@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox