From: Nicolai Buchwitz <nb@tipi-net.de>
To: Andrew Lunn <andrew+netdev@lunn.ch>,
"David S . Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Doug Berger <opendmb@gmail.com>,
Florian Fainelli <florian.fainelli@broadcom.com>
Cc: "Broadcom internal kernel review list"
<bcm-kernel-feedback-list@broadcom.com>,
"Vikas Gupta" <vikas.gupta@broadcom.com>,
"Bhargava Marreddy" <bhargava.marreddy@broadcom.com>,
"Rajashekar Hudumula" <rajashekar.hudumula@broadcom.com>,
"Eric Biggers" <ebiggers@kernel.org>,
"Heiner Kallweit" <hkallweit1@gmail.com>,
"Markus Blöchl" <mbloch@rooftopnetworks.de>,
"Arnd Bergmann" <arnd@arndb.de>,
netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
"Nicolai Buchwitz" <nb@tipi-net.de>
Subject: [PATCH net-next 4/6] net: bcmgenet: add XDP_TX support
Date: Fri, 13 Mar 2026 10:20:59 +0100 [thread overview]
Message-ID: <20260313092101.1344954-5-nb@tipi-net.de> (raw)
In-Reply-To: <20260313092101.1344954-1-nb@tipi-net.de>
Implement XDP_TX by submitting XDP frames through the default TX ring
(DESC_INDEX). The frame is DMA-mapped and placed into a single TX
descriptor with SOP|EOP|APPEND_CRC flags.
The xdp_frame pointer is stored in the TX control block so that
bcmgenet_free_tx_cb() can call xdp_return_frame() on TX completion,
returning the page to the originating page_pool.
The page_pool DMA direction is changed from DMA_FROM_DEVICE to
DMA_BIDIRECTIONAL to support the TX DMA mapping of received pages.
Signed-off-by: Nicolai Buchwitz <nb@tipi-net.de>
---
.../net/ethernet/broadcom/genet/bcmgenet.c | 73 ++++++++++++++++++-
.../net/ethernet/broadcom/genet/bcmgenet.h | 1 +
2 files changed, 71 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index d43729fc2b1b..373ba5878ca1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1893,6 +1893,12 @@ static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
if (cb == GENET_CB(skb)->last_cb)
return skb;
+ } else if (cb->xdpf) {
+ dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ xdp_return_frame(cb->xdpf);
+ cb->xdpf = NULL;
} else if (dma_unmap_addr(cb, dma_addr)) {
dma_unmap_page(dev,
dma_unmap_addr(cb, dma_addr),
@@ -2299,10 +2305,62 @@ static struct sk_buff *bcmgenet_xdp_build_skb(struct bcmgenet_rx_ring *ring,
return skb;
}
+static bool bcmgenet_xdp_xmit_frame(struct bcmgenet_priv *priv,
+ struct xdp_frame *xdpf)
+{
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
+ struct device *kdev = &priv->pdev->dev;
+ struct enet_cb *tx_cb_ptr;
+ dma_addr_t mapping;
+ u32 len_stat;
+
+ spin_lock(&ring->lock);
+
+ if (ring->free_bds < 1) {
+ spin_unlock(&ring->lock);
+ return false;
+ }
+
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+ mapping = dma_map_single(kdev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(kdev, mapping)) {
+ tx_cb_ptr->skb = NULL;
+ tx_cb_ptr->xdpf = NULL;
+ bcmgenet_put_txcb(priv, ring);
+ spin_unlock(&ring->lock);
+ return false;
+ }
+
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, xdpf->len);
+ tx_cb_ptr->skb = NULL;
+ tx_cb_ptr->xdpf = xdpf;
+
+ len_stat = (xdpf->len << DMA_BUFLENGTH_SHIFT) |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
+ DMA_TX_APPEND_CRC | DMA_SOP | DMA_EOP;
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
+
+ ring->free_bds--;
+ ring->prod_index++;
+ ring->prod_index &= DMA_P_INDEX_MASK;
+
+ bcmgenet_tdma_ring_writel(priv, ring->index, ring->prod_index,
+ TDMA_PROD_INDEX);
+
+ spin_unlock(&ring->lock);
+
+ return true;
+}
+
static unsigned int
bcmgenet_run_xdp(struct bcmgenet_rx_ring *ring, struct bpf_prog *prog,
struct xdp_buff *xdp, struct page *rx_page)
{
+ struct bcmgenet_priv *priv = ring->priv;
+ struct xdp_frame *xdpf;
unsigned int act;
act = bpf_prog_run_xdp(prog, xdp);
@@ -2310,14 +2368,23 @@ bcmgenet_run_xdp(struct bcmgenet_rx_ring *ring, struct bpf_prog *prog,
switch (act) {
case XDP_PASS:
return XDP_PASS;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf) ||
+ unlikely(!bcmgenet_xdp_xmit_frame(priv, xdpf))) {
+ page_pool_put_full_page(ring->page_pool, rx_page,
+ true);
+ return XDP_DROP;
+ }
+ return XDP_TX;
case XDP_DROP:
page_pool_put_full_page(ring->page_pool, rx_page, true);
return XDP_DROP;
default:
- bpf_warn_invalid_xdp_action(ring->priv->dev, prog, act);
+ bpf_warn_invalid_xdp_action(priv->dev, prog, act);
fallthrough;
case XDP_ABORTED:
- trace_xdp_exception(ring->priv->dev, prog, act);
+ trace_xdp_exception(priv->dev, prog, act);
page_pool_put_full_page(ring->page_pool, rx_page, true);
return XDP_ABORTED;
}
@@ -2846,7 +2913,7 @@ static int bcmgenet_rx_ring_create_pool(struct bcmgenet_priv *priv,
.pool_size = ring->size,
.nid = NUMA_NO_NODE,
.dev = &priv->pdev->dev,
- .dma_dir = DMA_FROM_DEVICE,
+ .dma_dir = DMA_BIDIRECTIONAL,
.offset = GENET_XDP_HEADROOM,
.max_len = RX_BUF_LENGTH,
};
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1459473ac1b0..192db0defbfc 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -472,6 +472,7 @@ struct bcmgenet_rx_stats64 {
struct enet_cb {
struct sk_buff *skb;
+ struct xdp_frame *xdpf;
struct page *rx_page;
unsigned int rx_page_offset;
void __iomem *bd_addr;
--
2.51.0
next prev parent reply other threads:[~2026-03-13 9:21 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-13 9:20 [PATCH net-next 0/6] net: bcmgenet: add XDP support Nicolai Buchwitz
2026-03-13 9:20 ` [PATCH net-next 1/6] net: bcmgenet: convert RX path to page_pool Nicolai Buchwitz
2026-03-13 9:20 ` [PATCH net-next 2/6] net: bcmgenet: register xdp_rxq_info for each RX ring Nicolai Buchwitz
2026-03-13 9:20 ` [PATCH net-next 3/6] net: bcmgenet: add basic XDP support (PASS/DROP) Nicolai Buchwitz
2026-03-13 22:48 ` Florian Fainelli
2026-03-14 19:48 ` Nicolai Buchwitz
2026-03-13 9:20 ` Nicolai Buchwitz [this message]
2026-03-13 11:37 ` [PATCH net-next 4/6] net: bcmgenet: add XDP_TX support Subbaraya Sundeep
2026-03-13 12:45 ` Nicolai Buchwitz
2026-03-13 9:21 ` [PATCH net-next 5/6] net: bcmgenet: add XDP_REDIRECT and ndo_xdp_xmit support Nicolai Buchwitz
2026-03-13 9:21 ` [PATCH net-next 6/6] net: bcmgenet: add XDP statistics counters Nicolai Buchwitz
2026-03-13 23:01 ` [PATCH net-next 0/6] net: bcmgenet: add XDP support Florian Fainelli
2026-03-14 0:13 ` Florian Fainelli
2026-03-14 19:51 ` Nicolai Buchwitz
2026-03-14 15:52 ` Jakub Kicinski
2026-03-14 19:52 ` Nicolai Buchwitz
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260313092101.1344954-5-nb@tipi-net.de \
--to=nb@tipi-net.de \
--cc=andrew+netdev@lunn.ch \
--cc=arnd@arndb.de \
--cc=bcm-kernel-feedback-list@broadcom.com \
--cc=bhargava.marreddy@broadcom.com \
--cc=davem@davemloft.net \
--cc=ebiggers@kernel.org \
--cc=edumazet@google.com \
--cc=florian.fainelli@broadcom.com \
--cc=hkallweit1@gmail.com \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mbloch@rooftopnetworks.de \
--cc=netdev@vger.kernel.org \
--cc=opendmb@gmail.com \
--cc=pabeni@redhat.com \
--cc=rajashekar.hudumula@broadcom.com \
--cc=vikas.gupta@broadcom.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox