From: Paolo Valerio <pvalerio@redhat.com>
To: netdev@vger.kernel.org
Cc: "Nicolas Ferre" <nicolas.ferre@microchip.com>,
"Claudiu Beznea" <claudiu.beznea@tuxon.dev>,
"Andrew Lunn" <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"Paolo Abeni" <pabeni@redhat.com>,
"Lorenzo Bianconi" <lorenzo@kernel.org>,
"Théo Lebrun" <theo.lebrun@bootlin.com>
Subject: [PATCH net-next 8/8] cadence: macb: introduce xmit support
Date: Thu, 15 Jan 2026 23:25:31 +0100 [thread overview]
Message-ID: <20260115222531.313002-9-pvalerio@redhat.com> (raw)
In-Reply-To: <20260115222531.313002-1-pvalerio@redhat.com>
Add XDP_TX verdict support, also introduce ndo_xdp_xmit function for
redirection, and update macb_tx_unmap() to handle both skbs and xdp
frames advertising NETDEV_XDP_ACT_NDO_XMIT capability and the ability
to process XDP_TX verdicts.
Signed-off-by: Paolo Valerio <pvalerio@redhat.com>
---
drivers/net/ethernet/cadence/macb_main.c | 165 +++++++++++++++++++++--
1 file changed, 157 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index afd8c0f2d895..32f8629bcb25 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -969,6 +969,17 @@ static int macb_halt_tx(struct macb *bp)
bp, TSR);
}
+static void release_buff(void *buff, enum macb_tx_buff_type type, int budget)
+{
+ if (type == MACB_TYPE_SKB) {
+ napi_consume_skb(buff, budget);
+ } else if (type == MACB_TYPE_XDP_TX) {
+ xdp_return_frame_rx_napi(buff);
+ } else {
+ xdp_return_frame(buff);
+ }
+}
+
static void macb_tx_unmap(struct macb *bp, struct macb_tx_buff *tx_buff,
int budget)
{
@@ -983,7 +994,7 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_buff *tx_buff,
}
if (tx_buff->ptr) {
- napi_consume_skb(tx_buff->ptr, budget);
+ release_buff(tx_buff->ptr, tx_buff->type, budget);
tx_buff->ptr = NULL;
}
}
@@ -1071,6 +1082,10 @@ static void macb_tx_error_task(struct work_struct *work)
desc = macb_tx_desc(queue, tail);
ctrl = desc->ctrl;
tx_buff = macb_tx_buff(queue, tail);
+
+ if (tx_buff->type != MACB_TYPE_SKB)
+ goto unmap;
+
skb = tx_buff->ptr;
if (ctrl & MACB_BIT(TX_USED)) {
@@ -1108,6 +1123,7 @@ static void macb_tx_error_task(struct work_struct *work)
desc->ctrl = ctrl | MACB_BIT(TX_USED);
}
+unmap:
macb_tx_unmap(bp, tx_buff, 0);
}
@@ -1186,6 +1202,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
spin_lock_irqsave(&queue->tx_ptr_lock, flags);
head = queue->tx_head;
for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
+ void *data = NULL;
struct macb_tx_buff *tx_buff;
struct sk_buff *skb;
struct macb_dma_desc *desc;
@@ -1208,11 +1225,16 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
for (;; tail++) {
tx_buff = macb_tx_buff(queue, tail);
- if (tx_buff->type == MACB_TYPE_SKB)
- skb = tx_buff->ptr;
+ if (tx_buff->type != MACB_TYPE_SKB) {
+ data = tx_buff->ptr;
+ goto unmap;
+ }
/* First, update TX stats if needed */
- if (skb) {
+ if (tx_buff->type == MACB_TYPE_SKB && tx_buff->ptr) {
+ data = tx_buff->ptr;
+ skb = tx_buff->ptr;
+
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
!ptp_one_step_sync(skb))
gem_ptp_do_txstamp(bp, skb, desc);
@@ -1228,6 +1250,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
bytes += skb->len;
}
+unmap:
/* Now we can safely release resources */
macb_tx_unmap(bp, tx_buff, budget);
@@ -1235,7 +1258,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
* WARNING: at this point skb has been freed by
* macb_tx_unmap().
*/
- if (skb)
+ if (data)
break;
}
}
@@ -1350,10 +1373,127 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
*/
}
+static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map,
+ dma_addr_t addr)
+{
+ enum macb_tx_buff_type buff_type;
+ struct macb_tx_buff *tx_buff;
+ int cpu = smp_processor_id();
+ struct macb_dma_desc *desc;
+ struct macb_queue *queue;
+ unsigned int next_head;
+ unsigned long flags;
+ dma_addr_t mapping;
+ u16 queue_index;
+ int err = 0;
+ u32 ctrl;
+
+ queue_index = cpu % bp->num_queues;
+ queue = &bp->queues[queue_index];
+ buff_type = dma_map ? MACB_TYPE_XDP_NDO : MACB_TYPE_XDP_TX;
+
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
+
+ /* This is a hard error, log it. */
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) {
+ netif_stop_subqueue(dev, queue_index);
+ netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
+ queue->tx_head, queue->tx_tail);
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (dma_map) {
+ mapping = dma_map_single(&bp->pdev->dev,
+ xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+ } else {
+ mapping = addr;
+ dma_sync_single_for_device(&bp->pdev->dev, mapping,
+ xdpf->len, DMA_BIDIRECTIONAL);
+ }
+
+ next_head = queue->tx_head + 1;
+
+ ctrl = MACB_BIT(TX_USED);
+ desc = macb_tx_desc(queue, next_head);
+ desc->ctrl = ctrl;
+
+ desc = macb_tx_desc(queue, queue->tx_head);
+ tx_buff = macb_tx_buff(queue, queue->tx_head);
+ tx_buff->ptr = xdpf;
+ tx_buff->type = buff_type;
+ tx_buff->mapping = mapping;
+ tx_buff->size = xdpf->len;
+ tx_buff->mapped_as_page = false;
+
+ ctrl = (u32)tx_buff->size;
+ ctrl |= MACB_BIT(TX_LAST);
+
+ if (unlikely(macb_tx_ring_wrap(bp, queue->tx_head) == (bp->tx_ring_size - 1)))
+ ctrl |= MACB_BIT(TX_WRAP);
+
+ /* Set TX buffer descriptor */
+ macb_set_addr(bp, desc, tx_buff->mapping);
+ /* desc->addr must be visible to hardware before clearing
+ * 'TX_USED' bit in desc->ctrl.
+ */
+ wmb();
+ desc->ctrl = ctrl;
+ queue->tx_head = next_head;
+
+ /* Make newly initialized descriptor visible to hardware */
+ wmb();
+
+ spin_lock(&bp->lock);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_unlock(&bp->lock);
+
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
+ netif_stop_subqueue(dev, queue_index);
+
+unlock:
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+
+ if (err)
+ release_buff(xdpf, buff_type, 0);
+
+ return err;
+}
+
+static int gem_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct macb *bp = netdev_priv(dev);
+ u32 xmitted = 0;
+ int i;
+
+ if (!macb_is_gem(bp))
+ return -EOPNOTSUPP;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < num_frame; i++) {
+ if (macb_xdp_submit_frame(bp, frames[i], dev, true, 0))
+ break;
+
+ xmitted++;
+ }
+
+ return xmitted;
+}
+
static u32 gem_xdp_run(struct macb_queue *queue, void *buff_head,
- unsigned int len)
+ unsigned int len, dma_addr_t addr)
{
struct net_device *dev;
+ struct xdp_frame *xdpf;
struct bpf_prog *prog;
struct xdp_buff xdp;
@@ -1380,6 +1520,13 @@ static u32 gem_xdp_run(struct macb_queue *queue, void *buff_head,
break;
}
goto out;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+
+ if (!xdpf || macb_xdp_submit_frame(queue->bp, xdpf, dev, false,
+ addr))
+ act = XDP_DROP;
+ goto out;
default:
bpf_warn_invalid_xdp_action(dev, prog, act);
fallthrough;
@@ -1467,7 +1614,7 @@ static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
if (!(first_frame && last_frame))
goto skip_xdp;
- ret = gem_xdp_run(queue, buff_head, data_len);
+ ret = gem_xdp_run(queue, buff_head, data_len, addr);
if (ret == XDP_REDIRECT)
xdp_flush = true;
@@ -4580,6 +4727,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_hwtstamp_get = macb_hwtstamp_get,
.ndo_setup_tc = macb_setup_tc,
.ndo_bpf = gem_xdp,
+ .ndo_xdp_xmit = gem_xdp_xmit,
};
/* Configure peripheral capabilities according to device tree
@@ -5888,7 +6036,8 @@ static int macb_probe(struct platform_device *pdev)
bp->rx_headroom += NET_IP_ALIGN;
dev->xdp_features = NETDEV_XDP_ACT_BASIC |
- NETDEV_XDP_ACT_REDIRECT;
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
}
netif_carrier_off(dev);
--
2.52.0
next prev parent reply other threads:[~2026-01-15 22:26 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-15 22:25 [PATCH net-next 0/8] net: macb: Add XDP support and page pool integration Paolo Valerio
2026-01-15 22:25 ` [PATCH net-next 1/8] net: macb: move Rx buffers alloc from link up to open Paolo Valerio
2026-01-15 22:25 ` [PATCH net-next 2/8] net: macb: rename rx_skbuff into rx_buff Paolo Valerio
2026-01-15 22:25 ` [PATCH net-next 3/8] cadence: macb: Add page pool support handle multi-descriptor frame rx Paolo Valerio
2026-01-16 17:16 ` Andrew Lunn
2026-01-19 18:58 ` Paolo Valerio
2026-01-22 22:24 ` Paolo Valerio
2026-01-22 23:04 ` Andrew Lunn
2026-01-25 19:02 ` Paolo Valerio
2026-01-26 14:29 ` Andrew Lunn
2026-01-26 18:45 ` Théo Lebrun
2026-01-26 23:51 ` Paolo Valerio
2026-01-27 15:48 ` Théo Lebrun
2026-01-26 23:34 ` Paolo Valerio
2026-01-19 19:36 ` [net-next,3/8] " Jakub Kicinski
2026-01-22 14:39 ` Théo Lebrun
2026-01-22 15:16 ` Jakub Kicinski
2026-01-26 14:55 ` [PATCH net-next 3/8] " Théo Lebrun
2026-02-20 15:45 ` Théo Lebrun
2026-01-15 22:25 ` [PATCH net-next 4/8] cadence: macb: use the current queue number for stats Paolo Valerio
2026-01-15 22:25 ` [PATCH net-next 5/8] cadence: macb: add XDP support for gem Paolo Valerio
2026-01-19 19:36 ` [net-next,5/8] " Jakub Kicinski
2026-01-15 22:25 ` [PATCH net-next 6/8] cadence: macb: make macb_tx_skb generic Paolo Valerio
2026-01-15 22:25 ` [PATCH net-next 7/8] cadence: macb: make tx path skb agnostic Paolo Valerio
2026-01-15 22:25 ` Paolo Valerio [this message]
2026-01-19 19:36 ` [net-next,8/8] cadence: macb: introduce xmit support Jakub Kicinski
2026-02-02 16:31 ` [PATCH net-next 0/8] net: macb: Add XDP support and page pool integration Théo Lebrun
2026-02-13 16:57 ` [PATCH 1/6] net: macb: rename release_buff() -> macb_tx_release_buff() Théo Lebrun
2026-02-13 16:57 ` [PATCH 2/6] net: macb: drop two labels in gem_rx() Théo Lebrun
2026-02-13 16:57 ` [PATCH 3/6] net: macb: always use DMA_BIDIRECTIONAL on page pool buffers Théo Lebrun
2026-02-13 16:57 ` [PATCH 4/6] net: macb: account for stats in Rx XDP codepaths Théo Lebrun
2026-02-13 16:57 ` [PATCH 5/6] net: macb: improve Rx refill error message Théo Lebrun
2026-02-13 16:57 ` [PATCH 6/6] net: macb: rework macb_tx_complete() processing loop Théo Lebrun
2026-02-13 16:57 ` [PATCH net-next 0/8] net: macb: Add XDP support and page pool integration Théo Lebrun
2026-02-13 17:02 ` Théo Lebrun
2026-02-14 15:37 ` Paolo Valerio
2026-02-16 9:17 ` Théo Lebrun
2026-02-19 18:05 ` Paolo Valerio
2026-02-20 15:58 ` Théo Lebrun
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260115222531.313002-9-pvalerio@redhat.com \
--to=pvalerio@redhat.com \
--cc=andrew+netdev@lunn.ch \
--cc=claudiu.beznea@tuxon.dev \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=lorenzo@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=nicolas.ferre@microchip.com \
--cc=pabeni@redhat.com \
--cc=theo.lebrun@bootlin.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox