From: Paolo Valerio <pvalerio@redhat.com>
To: netdev@vger.kernel.org
Cc: "Nicolas Ferre" <nicolas.ferre@microchip.com>,
"Claudiu Beznea" <claudiu.beznea@tuxon.dev>,
"Andrew Lunn" <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"Paolo Abeni" <pabeni@redhat.com>,
"Lorenzo Bianconi" <lorenzo@kernel.org>,
"Théo Lebrun" <theo.lebrun@bootlin.com>,
"Nicolai Buchwitz" <nb@tipi-net.de>
Subject: [PATCH net-next v6 7/7] net: macb: introduce ndo_xdp_xmit support
Date: Mon, 23 Mar 2026 23:10:47 +0100 [thread overview]
Message-ID: <20260323221047.2749577-8-pvalerio@redhat.com> (raw)
In-Reply-To: <20260323221047.2749577-1-pvalerio@redhat.com>
Introduce ndo_xdp_xmit function for redirection,
update macb_xdp_submit_frame() accordingly, and advertise
NETDEV_XDP_ACT_NDO_XMIT capability.
Signed-off-by: Paolo Valerio <pvalerio@redhat.com>
Tested-by: Nicolai Buchwitz <nb@tipi-net.de>
---
drivers/net/ethernet/cadence/macb_main.c | 73 +++++++++++++++++++-----
1 file changed, 58 insertions(+), 15 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 537d02264851..38b15d1e7127 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1202,11 +1202,13 @@ static void macb_tx_release_buff(void *buff, enum macb_tx_buff_type type, int bu
{
if (type == MACB_TYPE_SKB) {
napi_consume_skb(buff, budget);
- } else {
+ } else if (type == MACB_TYPE_XDP_TX) {
if (!budget)
xdp_return_frame(buff);
else
xdp_return_frame_rx_napi(buff);
+ } else {
+ xdp_return_frame(buff);
}
}
@@ -1664,20 +1666,24 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
}
static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
- struct net_device *dev, dma_addr_t addr)
+ struct net_device *dev, bool dma_map,
+ dma_addr_t addr)
{
+ enum macb_tx_buff_type buff_type;
struct macb_tx_buff *tx_buff;
int cpu = smp_processor_id();
struct macb_dma_desc *desc;
struct macb_queue *queue;
unsigned int next_head;
unsigned long flags;
+ dma_addr_t mapping;
u16 queue_index;
int err = 0;
u32 ctrl;
queue_index = cpu % bp->num_queues;
queue = &bp->queues[queue_index];
+ buff_type = dma_map ? MACB_TYPE_XDP_NDO : MACB_TYPE_XDP_TX;
spin_lock_irqsave(&queue->tx_ptr_lock, flags);
@@ -1690,14 +1696,23 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
goto unlock;
}
- /* progs can adjust the head. Sync and set the adjusted one.
- * This also implicitly takes into account ip alignment,
- * if present.
- */
- addr += xdpf->headroom + sizeof(*xdpf);
-
- dma_sync_single_for_device(&bp->pdev->dev, addr,
- xdpf->len, DMA_BIDIRECTIONAL);
+ if (dma_map) {
+ mapping = dma_map_single(&bp->pdev->dev,
+ xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+ } else {
+ /* progs can adjust the head. Sync and set the adjusted one.
+ * This also implicitly takes into account ip alignment,
+ * if present.
+ */
+ mapping = addr + xdpf->headroom + sizeof(*xdpf);
+ dma_sync_single_for_device(&bp->pdev->dev, mapping,
+ xdpf->len, DMA_BIDIRECTIONAL);
+ }
next_head = queue->tx_head + 1;
@@ -1708,8 +1723,8 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
desc = macb_tx_desc(queue, queue->tx_head);
tx_buff = macb_tx_buff(queue, queue->tx_head);
tx_buff->ptr = xdpf;
- tx_buff->type = MACB_TYPE_XDP_TX;
- tx_buff->mapping = 0;
+ tx_buff->type = buff_type;
+ tx_buff->mapping = dma_map ? mapping : 0;
tx_buff->size = xdpf->len;
tx_buff->mapped_as_page = false;
@@ -1720,7 +1735,7 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
ctrl |= MACB_BIT(TX_WRAP);
/* Set TX buffer descriptor */
- macb_set_addr(bp, desc, addr);
+ macb_set_addr(bp, desc, mapping);
/* desc->addr must be visible to hardware before clearing
* 'TX_USED' bit in desc->ctrl.
*/
@@ -1745,6 +1760,32 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
return err;
}
+static int gem_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct macb *bp = netdev_priv(dev);
+ u32 xmitted = 0;
+ int i;
+
+ if (!macb_is_gem(bp))
+ return -EOPNOTSUPP;
+
+ if (unlikely(!netif_carrier_ok(dev)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < num_frame; i++) {
+ if (macb_xdp_submit_frame(bp, frames[i], dev, true, 0))
+ break;
+
+ xmitted++;
+ }
+
+ return xmitted;
+}
+
static u32 gem_xdp_run(struct macb_queue *queue, void *buff_head,
unsigned int *len, unsigned int *headroom,
dma_addr_t addr)
@@ -1782,7 +1823,7 @@ static u32 gem_xdp_run(struct macb_queue *queue, void *buff_head,
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(&xdp);
if (unlikely(!xdpf) || macb_xdp_submit_frame(queue->bp, xdpf,
- dev, addr)) {
+ dev, false, addr)) {
act = XDP_DROP;
break;
}
@@ -5043,6 +5084,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_hwtstamp_get = macb_hwtstamp_get,
.ndo_setup_tc = macb_setup_tc,
.ndo_bpf = gem_xdp,
+ .ndo_xdp_xmit = gem_xdp_xmit,
};
/* Configure peripheral capabilities according to device tree
@@ -6333,7 +6375,8 @@ static int macb_probe(struct platform_device *pdev)
bp->rx_ip_align = NET_IP_ALIGN;
dev->xdp_features = NETDEV_XDP_ACT_BASIC |
- NETDEV_XDP_ACT_REDIRECT;
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
}
netif_carrier_off(dev);
--
2.53.0
prev parent reply other threads:[~2026-03-23 22:11 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-23 22:10 [PATCH net-next v6 0/7] net: macb: Add XDP support and page pool integration Paolo Valerio
2026-03-23 22:10 ` [PATCH net-next v6 1/7] net: macb: move Rx buffers alloc from link up to open Paolo Valerio
2026-03-23 22:10 ` [PATCH net-next v6 2/7] net: macb: rename rx_skbuff into rx_buff Paolo Valerio
2026-03-23 22:10 ` [PATCH net-next v6 3/7] net: macb: Add page pool support handle multi-descriptor frame rx Paolo Valerio
2026-03-25 17:45 ` Simon Horman
2026-03-23 22:10 ` [PATCH net-next v6 4/7] net: macb: make macb_tx_skb generic Paolo Valerio
2026-03-23 22:10 ` [PATCH net-next v6 5/7] net: macb: generalize tx buffer handling Paolo Valerio
2026-03-23 22:10 ` [PATCH net-next v6 6/7] net: macb: add XDP support for gem Paolo Valerio
2026-03-24 4:57 ` Mohsin Bashir
2026-03-23 22:10 ` Paolo Valerio [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260323221047.2749577-8-pvalerio@redhat.com \
--to=pvalerio@redhat.com \
--cc=andrew+netdev@lunn.ch \
--cc=claudiu.beznea@tuxon.dev \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=lorenzo@kernel.org \
--cc=nb@tipi-net.de \
--cc=netdev@vger.kernel.org \
--cc=nicolas.ferre@microchip.com \
--cc=pabeni@redhat.com \
--cc=theo.lebrun@bootlin.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox