From: Paolo Valerio <pvalerio@redhat.com>
To: netdev@vger.kernel.org
Cc: "Nicolas Ferre" <nicolas.ferre@microchip.com>,
"Claudiu Beznea" <claudiu.beznea@tuxon.dev>,
"Andrew Lunn" <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"Paolo Abeni" <pabeni@redhat.com>,
"Lorenzo Bianconi" <lorenzo@kernel.org>,
"Théo Lebrun" <theo.lebrun@bootlin.com>
Subject: [PATCH net-next v2 8/8] net: macb: introduce xmit support
Date: Mon, 23 Feb 2026 19:26:32 +0100 [thread overview]
Message-ID: <20260223182632.1681809-9-pvalerio@redhat.com> (raw)
In-Reply-To: <20260223182632.1681809-1-pvalerio@redhat.com>
Add XDP_TX verdict support, also introduce ndo_xdp_xmit function for
redirection, and update macb_tx_unmap() to handle both skbs and xdp
frames advertising NETDEV_XDP_ACT_NDO_XMIT capability and the ability
to process XDP_TX verdicts.
Signed-off-by: Paolo Valerio <pvalerio@redhat.com>
---
drivers/net/ethernet/cadence/macb_main.c | 171 +++++++++++++++++++++--
1 file changed, 162 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 50646ee90672..69392ec0065f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -967,6 +967,17 @@ static int macb_halt_tx(struct macb *bp)
bp, TSR);
}
+static void macb_tx_release_buff(void *buff, enum macb_tx_buff_type type, int budget)
+{
+ if (type == MACB_TYPE_SKB) {
+ napi_consume_skb(buff, budget);
+ } else if (type == MACB_TYPE_XDP_TX) {
+ xdp_return_frame_rx_napi(buff);
+ } else {
+ xdp_return_frame(buff);
+ }
+}
+
static void macb_tx_unmap(struct macb *bp, struct macb_tx_buff *tx_buff,
int budget)
{
@@ -981,7 +992,7 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_buff *tx_buff,
}
if (tx_buff->ptr) {
- napi_consume_skb(tx_buff->ptr, budget);
+ macb_tx_release_buff(tx_buff->ptr, tx_buff->type, budget);
tx_buff->ptr = NULL;
}
}
@@ -1069,6 +1080,10 @@ static void macb_tx_error_task(struct work_struct *work)
desc = macb_tx_desc(queue, tail);
ctrl = desc->ctrl;
tx_buff = macb_tx_buff(queue, tail);
+
+ if (tx_buff->type != MACB_TYPE_SKB)
+ goto unmap;
+
skb = tx_buff->ptr;
if (ctrl & MACB_BIT(TX_USED)) {
@@ -1106,6 +1121,7 @@ static void macb_tx_error_task(struct work_struct *work)
desc->ctrl = ctrl | MACB_BIT(TX_USED);
}
+unmap:
macb_tx_unmap(bp, tx_buff, 0);
}
@@ -1184,6 +1200,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
spin_lock_irqsave(&queue->tx_ptr_lock, flags);
head = queue->tx_head;
for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
+ void *data = NULL;
struct macb_tx_buff *tx_buff;
struct sk_buff *skb;
struct macb_dma_desc *desc;
@@ -1206,11 +1223,16 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
for (;; tail++) {
tx_buff = macb_tx_buff(queue, tail);
- if (tx_buff->type == MACB_TYPE_SKB)
- skb = tx_buff->ptr;
+ if (tx_buff->type != MACB_TYPE_SKB) {
+ data = tx_buff->ptr;
+ goto unmap;
+ }
/* First, update TX stats if needed */
- if (skb) {
+ if (tx_buff->type == MACB_TYPE_SKB && tx_buff->ptr) {
+ data = tx_buff->ptr;
+ skb = tx_buff->ptr;
+
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
!ptp_one_step_sync(skb))
gem_ptp_do_txstamp(bp, skb, desc);
@@ -1226,6 +1248,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
bytes += skb->len;
}
+unmap:
/* Now we can safely release resources */
macb_tx_unmap(bp, tx_buff, budget);
@@ -1233,7 +1256,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
* WARNING: at this point skb has been freed by
* macb_tx_unmap().
*/
- if (skb)
+ if (data)
break;
}
}
@@ -1363,10 +1386,127 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
*/
}
+static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map,
+ dma_addr_t addr)
+{
+ enum macb_tx_buff_type buff_type;
+ struct macb_tx_buff *tx_buff;
+ int cpu = smp_processor_id();
+ struct macb_dma_desc *desc;
+ struct macb_queue *queue;
+ unsigned int next_head;
+ unsigned long flags;
+ dma_addr_t mapping;
+ u16 queue_index;
+ int err = 0;
+ u32 ctrl;
+
+ queue_index = cpu % bp->num_queues;
+ queue = &bp->queues[queue_index];
+ buff_type = dma_map ? MACB_TYPE_XDP_NDO : MACB_TYPE_XDP_TX;
+
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
+
+ /* This is a hard error, log it. */
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) {
+ netif_stop_subqueue(dev, queue_index);
+ netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
+ queue->tx_head, queue->tx_tail);
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (dma_map) {
+ mapping = dma_map_single(&bp->pdev->dev,
+ xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+ } else {
+ mapping = addr;
+ dma_sync_single_for_device(&bp->pdev->dev, mapping,
+ xdpf->len, DMA_BIDIRECTIONAL);
+ }
+
+ next_head = queue->tx_head + 1;
+
+ ctrl = MACB_BIT(TX_USED);
+ desc = macb_tx_desc(queue, next_head);
+ desc->ctrl = ctrl;
+
+ desc = macb_tx_desc(queue, queue->tx_head);
+ tx_buff = macb_tx_buff(queue, queue->tx_head);
+ tx_buff->ptr = xdpf;
+ tx_buff->type = buff_type;
+ tx_buff->mapping = dma_map ? mapping : 0;
+ tx_buff->size = xdpf->len;
+ tx_buff->mapped_as_page = false;
+
+ ctrl = (u32)tx_buff->size;
+ ctrl |= MACB_BIT(TX_LAST);
+
+ if (unlikely(macb_tx_ring_wrap(bp, queue->tx_head) == (bp->tx_ring_size - 1)))
+ ctrl |= MACB_BIT(TX_WRAP);
+
+ /* Set TX buffer descriptor */
+ macb_set_addr(bp, desc, mapping);
+ /* desc->addr must be visible to hardware before clearing
+ * 'TX_USED' bit in desc->ctrl.
+ */
+ wmb();
+ desc->ctrl = ctrl;
+ queue->tx_head = next_head;
+
+ /* Make newly initialized descriptor visible to hardware */
+ wmb();
+
+ spin_lock(&bp->lock);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_unlock(&bp->lock);
+
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
+ netif_stop_subqueue(dev, queue_index);
+
+unlock:
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+
+ if (err)
+ macb_tx_release_buff(xdpf, buff_type, 0);
+
+ return err;
+}
+
+static int gem_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct macb *bp = netdev_priv(dev);
+ u32 xmitted = 0;
+ int i;
+
+ if (!macb_is_gem(bp))
+ return -EOPNOTSUPP;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < num_frame; i++) {
+ if (macb_xdp_submit_frame(bp, frames[i], dev, true, 0))
+ break;
+
+ xmitted++;
+ }
+
+ return xmitted;
+}
+
static u32 gem_xdp_run(struct macb_queue *queue, void *buff_head,
- unsigned int len)
+ unsigned int len, dma_addr_t addr)
{
struct net_device *dev;
+ struct xdp_frame *xdpf;
struct bpf_prog *prog;
struct xdp_buff xdp;
@@ -1393,6 +1533,18 @@ static u32 gem_xdp_run(struct macb_queue *queue, void *buff_head,
break;
}
goto out;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+
+ if (unlikely(!xdpf)) {
+ act = XDP_DROP;
+ break;
+ }
+
+ if (macb_xdp_submit_frame(queue->bp, xdpf, dev, false,
+ addr))
+ act = XDP_DROP;
+ goto out;
default:
bpf_warn_invalid_xdp_action(dev, prog, act);
fallthrough;
@@ -1480,7 +1632,7 @@ static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
if (first_frame) {
if (last_frame) {
- ret = gem_xdp_run(queue, buff_head, data_len);
+ ret = gem_xdp_run(queue, buff_head, data_len, addr);
if (ret == XDP_REDIRECT)
xdp_flush = true;
@@ -4620,6 +4772,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_hwtstamp_get = macb_hwtstamp_get,
.ndo_setup_tc = macb_setup_tc,
.ndo_bpf = gem_xdp,
+ .ndo_xdp_xmit = gem_xdp_xmit,
};
/* Configure peripheral capabilities according to device tree
@@ -5920,8 +6073,8 @@ static int macb_probe(struct platform_device *pdev)
if (macb_is_gem(bp))
dev->xdp_features = NETDEV_XDP_ACT_BASIC |
- NETDEV_XDP_ACT_REDIRECT;
-
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
netif_carrier_off(dev);
err = register_netdev(dev);
--
2.52.0
next prev parent reply other threads:[~2026-02-23 18:27 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-23 18:26 [PATCH net-next v2 0/8] net: macb: Add XDP support and page pool integration Paolo Valerio
2026-02-23 18:26 ` [PATCH net-next v2 1/8] net: macb: move Rx buffers alloc from link up to open Paolo Valerio
2026-02-24 0:08 ` [net-next,v2,1/8] " Jakub Kicinski
2026-02-25 18:29 ` Paolo Valerio
2026-02-23 18:26 ` [PATCH net-next v2 2/8] net: macb: rename rx_skbuff into rx_buff Paolo Valerio
2026-02-23 18:26 ` [PATCH net-next v2 3/8] net: macb: Add page pool support handle multi-descriptor frame rx Paolo Valerio
2026-02-23 18:26 ` [PATCH net-next v2 4/8] net: macb: use the current queue number for stats Paolo Valerio
2026-02-23 18:26 ` [PATCH net-next v2 5/8] net: macb: add XDP support for gem Paolo Valerio
2026-02-23 23:23 ` kernel test robot
2026-02-24 0:08 ` [net-next,v2,5/8] " Jakub Kicinski
2026-02-25 18:30 ` Paolo Valerio
2026-02-27 10:52 ` Théo Lebrun
2026-02-28 13:49 ` Claudiu Beznea
2026-02-23 18:26 ` [PATCH net-next v2 6/8] net: macb: make macb_tx_skb generic Paolo Valerio
2026-02-24 0:08 ` [net-next,v2,6/8] " Jakub Kicinski
2026-02-23 18:26 ` [PATCH net-next v2 7/8] net: macb: make tx path skb agnostic Paolo Valerio
2026-02-24 0:09 ` [net-next,v2,7/8] " Jakub Kicinski
2026-02-25 18:36 ` Paolo Valerio
2026-02-23 18:26 ` Paolo Valerio [this message]
2026-02-24 0:09 ` [net-next,v2,8/8] net: macb: introduce xmit support Jakub Kicinski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260223182632.1681809-9-pvalerio@redhat.com \
--to=pvalerio@redhat.com \
--cc=andrew+netdev@lunn.ch \
--cc=claudiu.beznea@tuxon.dev \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=lorenzo@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=nicolas.ferre@microchip.com \
--cc=pabeni@redhat.com \
--cc=theo.lebrun@bootlin.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox