From: Wei Fang <wei.fang@nxp.com>
To: shenwei.wang@nxp.com, xiaoning.wang@nxp.com, frank.li@nxp.com,
andrew+netdev@lunn.ch, davem@davemloft.net, edumazet@google.com,
kuba@kernel.org, pabeni@redhat.com, ast@kernel.org,
daniel@iogearbox.net, hawk@kernel.org, john.fastabend@gmail.com,
sdf@fomichev.me
Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
imx@lists.linux.dev, bpf@vger.kernel.org
Subject: [PATCH net-next 07/11] net: fec: use switch statement to check the type of tx_buf
Date: Tue, 13 Jan 2026 11:29:35 +0800 [thread overview]
Message-ID: <20260113032939.3705137-8-wei.fang@nxp.com> (raw)
In-Reply-To: <20260113032939.3705137-1-wei.fang@nxp.com>
The tx_buf has three types: FEC_TXBUF_T_SKB, FEC_TXBUF_T_XDP_NDO and
FEC_TXBUF_T_XDP_TX. Currently, the driver uses 'if...else...' statements
to check the type and perform the corresponding processing. This is very
detrimental to future expansion. For example, if new types are added to
support XDP zero copy in the future, continuing to use 'if...else...'
would be a very bad coding style. So the 'if...else...' statements in
the current driver are replaced with switch statements to support XDP
zero copy in the future.
Signed-off-by: Wei Fang <wei.fang@nxp.com>
---
drivers/net/ethernet/freescale/fec_main.c | 167 +++++++++++-----------
1 file changed, 82 insertions(+), 85 deletions(-)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f3e93598a27c..3bd89d7f105b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1023,33 +1023,33 @@ static void fec_enet_bd_init(struct net_device *dev)
txq->bd.cur = bdp;
for (i = 0; i < txq->bd.ring_size; i++) {
+ dma_addr_t dma = fec32_to_cpu(bdp->cbd_bufaddr);
+ struct page *page;
+
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = cpu_to_fec16(0);
- if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
- if (bdp->cbd_bufaddr &&
- !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
- if (txq->tx_buf[i].buf_p)
- dev_kfree_skb_any(txq->tx_buf[i].buf_p);
- } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
- if (bdp->cbd_bufaddr)
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
+ switch (txq->tx_buf[i].type) {
+ case FEC_TXBUF_T_SKB:
+ if (dma && !IS_TSO_HEADER(txq, dma))
+ dma_unmap_single(&fep->pdev->dev, dma,
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
- if (txq->tx_buf[i].buf_p)
- xdp_return_frame(txq->tx_buf[i].buf_p);
- } else {
- struct page *page = txq->tx_buf[i].buf_p;
-
- if (page)
- page_pool_put_page(pp_page_to_nmdesc(page)->pp,
- page, 0,
- false);
+ dev_kfree_skb_any(txq->tx_buf[i].buf_p);
+ break;
+ case FEC_TXBUF_T_XDP_NDO:
+ dma_unmap_single(&fep->pdev->dev, dma,
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ xdp_return_frame(txq->tx_buf[i].buf_p);
+ break;
+ case FEC_TXBUF_T_XDP_TX:
+ page = txq->tx_buf[i].buf_p;
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0, false);
+ break;
+ default:
+ break;
}
txq->tx_buf[i].buf_p = NULL;
@@ -1514,45 +1514,66 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
break;
index = fec_enet_get_bd_index(bdp, &txq->bd);
+ frame_len = fec16_to_cpu(bdp->cbd_datlen);
- if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
- skb = txq->tx_buf[index].buf_p;
+ switch (txq->tx_buf[index].type) {
+ case FEC_TXBUF_T_SKB:
if (bdp->cbd_bufaddr &&
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
- bdp->cbd_bufaddr = cpu_to_fec32(0);
+ frame_len, DMA_TO_DEVICE);
+
+ skb = txq->tx_buf[index].buf_p;
if (!skb)
goto tx_buf_done;
- } else {
+
+ frame_len = skb->len;
+
+ /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
+ * are to time stamp the packet, so we still need to check time
+ * stamping enabled flag.
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ fep->hwts_tx_en) && fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+ /* Free the sk buffer associated with this last transmit */
+ napi_consume_skb(skb, budget);
+ break;
+ case FEC_TXBUF_T_XDP_NDO:
/* Tx processing cannot call any XDP (or page pool) APIs if
* the "budget" is 0. Because NAPI is called with budget of
* 0 (such as netpoll) indicates we may be in an IRQ context,
* however, we can't use the page pool from IRQ context.
*/
if (unlikely(!budget))
- break;
+ goto out;
- if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
- xdpf = txq->tx_buf[index].buf_p;
- if (bdp->cbd_bufaddr)
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
- } else {
- page = txq->tx_buf[index].buf_p;
- }
-
- bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (unlikely(!txq->tx_buf[index].buf_p)) {
- txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
- goto tx_buf_done;
- }
+ xdpf = txq->tx_buf[index].buf_p;
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ frame_len, DMA_TO_DEVICE);
+ xdp_return_frame_rx_napi(xdpf);
+ break;
+ case FEC_TXBUF_T_XDP_TX:
+ if (unlikely(!budget))
+ goto out;
- frame_len = fec16_to_cpu(bdp->cbd_datlen);
+ page = txq->tx_buf[index].buf_p;
+ /* The dma_sync_size = 0 as XDP_TX has already synced
+ * DMA for_device
+ */
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
+ 0, true);
+ break;
+ default:
+ break;
}
/* Check for errors. */
@@ -1572,11 +1593,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
ndev->stats.tx_carrier_errors++;
} else {
ndev->stats.tx_packets++;
-
- if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
- ndev->stats.tx_bytes += skb->len;
- else
- ndev->stats.tx_bytes += frame_len;
+ ndev->stats.tx_bytes += frame_len;
}
/* Deferred means some collisions occurred during transmit,
@@ -1585,35 +1602,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
if (status & BD_ENET_TX_DEF)
ndev->stats.collisions++;
- if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
- /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
- * are to time stamp the packet, so we still need to check time
- * stamping enabled flag.
- */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
- fep->hwts_tx_en) && fep->bufdesc_ex) {
- struct skb_shared_hwtstamps shhwtstamps;
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-
- fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
- skb_tstamp_tx(skb, &shhwtstamps);
- }
-
- /* Free the sk buffer associated with this last transmit */
- napi_consume_skb(skb, budget);
- } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
- xdp_return_frame_rx_napi(xdpf);
- } else { /* recycle pages of XDP_TX frames */
- /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
- page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
- 0, true);
- }
-
txq->tx_buf[index].buf_p = NULL;
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
tx_buf_done:
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
/* Make sure the update to bdp and tx_buf are performed
* before dirty_tx
*/
@@ -1632,6 +1626,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
}
}
+out:
+
/* ERR006358: Keep the transmitter going */
if (bdp != txq->bd.cur &&
readl(txq->bd.reg_desc_active) == 0)
@@ -3413,6 +3409,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
unsigned int i;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
+ struct page *page;
unsigned int q;
for (q = 0; q < fep->num_rx_queues; q++) {
@@ -3436,20 +3433,20 @@ static void fec_enet_free_buffers(struct net_device *ndev)
kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL;
- if (!txq->tx_buf[i].buf_p) {
- txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
- continue;
- }
-
- if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+ switch (txq->tx_buf[i].type) {
+ case FEC_TXBUF_T_SKB:
dev_kfree_skb(txq->tx_buf[i].buf_p);
- } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
+ break;
+ case FEC_TXBUF_T_XDP_NDO:
xdp_return_frame(txq->tx_buf[i].buf_p);
- } else {
- struct page *page = txq->tx_buf[i].buf_p;
-
+ break;
+ case FEC_TXBUF_T_XDP_TX:
+ page = txq->tx_buf[i].buf_p;
page_pool_put_page(pp_page_to_nmdesc(page)->pp,
page, 0, false);
+ break;
+ default:
+ break;
}
txq->tx_buf[i].buf_p = NULL;
--
2.34.1
next prev parent reply other threads:[~2026-01-13 3:30 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-13 3:29 [PATCH net-next 00/11] net: fec: improve XDP copy mode and add AF_XDP zero-copy support Wei Fang
2026-01-13 3:29 ` [PATCH net-next 01/11] net: fec: add fec_txq_trigger_xmit() helper Wei Fang
2026-01-13 15:52 ` Frank Li
2026-01-13 3:29 ` [PATCH net-next 02/11] net: fec: add fec_rx_error_check() to check RX errors Wei Fang
2026-01-13 15:53 ` Frank Li
2026-01-13 3:29 ` [PATCH net-next 03/11] net: fec: add rx_shift to indicate the extra bytes padded in front of RX frame Wei Fang
2026-01-13 15:56 ` Frank Li
2026-01-13 3:29 ` [PATCH net-next 04/11] net: fec: add fec_build_skb() to build a skb Wei Fang
2026-01-13 15:59 ` Frank Li
2026-01-14 2:32 ` Wei Fang
2026-01-13 3:29 ` [PATCH net-next 05/11] net: fec: add fec_enet_rx_queue_xdp() for XDP path Wei Fang
2026-01-13 16:11 ` Frank Li
2026-01-13 3:29 ` [PATCH net-next 06/11] net: fec: transmit XDP frames in bulk Wei Fang
2026-01-13 16:17 ` Frank Li
2026-01-14 9:57 ` Wei Fang
2026-01-13 3:29 ` Wei Fang [this message]
2026-01-13 16:22 ` [PATCH net-next 07/11] net: fec: use switch statement to check the type of tx_buf Frank Li
2026-01-14 10:01 ` Wei Fang
2026-01-14 13:47 ` David Laight
2026-01-15 2:27 ` Wei Fang
2026-01-13 3:29 ` [PATCH net-next 08/11] net: fec: remove the size parameter from fec_enet_create_page_pool() Wei Fang
2026-01-13 3:29 ` [PATCH net-next 09/11] net: fec: move xdp_rxq_info* APIs out of fec_enet_create_page_pool() Wei Fang
2026-01-13 3:29 ` [PATCH net-next 10/11] net: fec: add fec_alloc_rxq_buffers_pp() to allocate buffers from page pool Wei Fang
2026-01-13 3:29 ` [PATCH net-next 11/11] net: fec: add AF_XDP zero-copy support Wei Fang
2026-01-13 9:29 ` kernel test robot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260113032939.3705137-8-wei.fang@nxp.com \
--to=wei.fang@nxp.com \
--cc=andrew+netdev@lunn.ch \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=frank.li@nxp.com \
--cc=hawk@kernel.org \
--cc=imx@lists.linux.dev \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=sdf@fomichev.me \
--cc=shenwei.wang@nxp.com \
--cc=xiaoning.wang@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox