From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
To: netdev@vger.kernel.org
Cc: bpf@vger.kernel.org, magnus.karlsson@intel.com,
stfomichev@gmail.com, kuba@kernel.org, pabeni@redhat.com,
horms@kernel.org, bjorn@kernel.org, lorenzo@kernel.org,
hawk@kernel.org, toke@redhat.com,
Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Subject: [PATCH RFC net-next 1/4] xdp: add mixed page_pool/page_shared memory type
Date: Sat, 9 May 2026 10:48:55 +0200 [thread overview]
Message-ID: <20260509084858.773921-2-maciej.fijalkowski@intel.com> (raw)
In-Reply-To: <20260509084858.773921-1-maciej.fijalkowski@intel.com>
Generic XDP runs on skb-backed data. In that mode the skb head remains
owned by the skb, but XDP helpers may still release frags, for example
when a program trims a non-linear packet.
With the generic page_pool CoW path, the frags visible to XDP may be
backed by the generic system page_pool. In the fallback path, or for
other skb-backed memory, the same generic XDP rxq may still describe
page-frag based memory. Selecting MEM_TYPE_PAGE_POOL or
MEM_TYPE_PAGE_SHARED purely from the rxq therefore either lies about
page_pool ownership or misses recycling opportunities.
Add MEM_TYPE_PAGE_POOL_OR_SHARED for skb-backed generic XDP users. The
return path inspects the actual netmem: page_pool-backed netmems are
returned through their page_pool, and everything else falls back to
page_frag_free(). Transition netdev_rx_queue's xdp_rxq_info from
MEM_TYPE_PAGE_SHARED to MEM_TYPE_PAGE_POOL_OR_SHARED.
This keeps rxq identity stable for users which inspect xdp->rxq->dev and
xdp->rxq->queue_index, while avoiding per-packet rxq->mem mutation.
Respect new mem_type in __xdp_build_skb_from_frame() as veth could
redirect xdp_frame onto cpumap.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
---
include/net/xdp.h | 1 +
net/core/dev.c | 7 ++++++
net/core/xdp.c | 54 ++++++++++++++++++++++++++++++++++++++++++-----
3 files changed, 57 insertions(+), 5 deletions(-)
diff --git a/include/net/xdp.h b/include/net/xdp.h
index aa742f413c35..d60b8857e4eb 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -45,6 +45,7 @@ enum xdp_mem_type {
MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */
MEM_TYPE_PAGE_POOL,
MEM_TYPE_XSK_BUFF_POOL,
+ MEM_TYPE_PAGE_POOL_OR_SHARED,
MEM_TYPE_MAX,
};
diff --git a/net/core/dev.c b/net/core/dev.c
index e59f6025067c..6cc2a5bed20f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -11207,6 +11207,13 @@ static int netif_alloc_rx_queues(struct net_device *dev)
err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
if (err < 0)
goto err_rxq_info;
+ err = xdp_rxq_info_reg_mem_model(&rx[i].xdp_rxq,
+ MEM_TYPE_PAGE_POOL_OR_SHARED,
+ NULL);
+ if (err < 0) {
+ xdp_rxq_info_unreg(&rx[i].xdp_rxq);
+ goto err_rxq_info;
+ }
}
return 0;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 9890a30584ba..c57a82620520 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -22,6 +22,7 @@
#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
#include <trace/events/xdp.h>
#include <net/xdp_sock_drv.h>
+#include "netmem_priv.h"
#define REG_STATE_NEW 0x0
#define REG_STATE_REGISTERED 0x1
@@ -280,6 +281,12 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
if (!__is_supported_mem_type(type))
return ERR_PTR(-EOPNOTSUPP);
+ /* MEM_TYPE_PAGE_POOL_OR_SHARED is expected to handle pp's allocator
+ * separately;
+ */
+ if (type == MEM_TYPE_PAGE_POOL_OR_SHARED && allocator)
+ return ERR_PTR(-EINVAL);
+
mem->type = type;
if (!allocator) {
@@ -424,6 +431,23 @@ void xdp_rxq_info_attach_page_pool(struct xdp_rxq_info *xdp_rxq,
}
EXPORT_SYMBOL_GPL(xdp_rxq_info_attach_page_pool);
+static bool xdp_netmem_is_pp(netmem_ref netmem)
+{
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ return netmem_is_pp(netmem);
+#else
+ return false;
+#endif
+}
+
+static void __xdp_return_page_pool(netmem_ref netmem, bool napi_direct)
+{
+ if (napi_direct && xdp_return_frame_no_direct())
+ napi_direct = false;
+
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, napi_direct);
+}
+
/* XDP RX runs under NAPI protection, and in different delivery error
* scenarios (e.g. queue full), it is possible to return the xdp_frame
* while still leveraging this protection. The @napi_direct boolean
@@ -433,20 +457,26 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_attach_page_pool);
void __xdp_return(netmem_ref netmem, enum xdp_mem_type mem_type,
bool napi_direct, struct xdp_buff *xdp)
{
+ netmem_ref head;
+
switch (mem_type) {
case MEM_TYPE_PAGE_POOL:
netmem = netmem_compound_head(netmem);
- if (napi_direct && xdp_return_frame_no_direct())
- napi_direct = false;
/* No need to check netmem_is_pp() as mem->type knows this a
* page_pool page
*/
- page_pool_put_full_netmem(netmem_get_pp(netmem), netmem,
- napi_direct);
+ __xdp_return_page_pool(netmem, napi_direct);
break;
case MEM_TYPE_PAGE_SHARED:
page_frag_free(__netmem_address(netmem));
break;
+ case MEM_TYPE_PAGE_POOL_OR_SHARED:
+ head = netmem_compound_head(netmem);
+ if (xdp_netmem_is_pp(head))
+ __xdp_return_page_pool(head, napi_direct);
+ else
+ page_frag_free(__netmem_address(netmem));
+ break;
case MEM_TYPE_PAGE_ORDER0:
put_page(__netmem_to_page(netmem));
break;
@@ -791,6 +821,19 @@ struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp)
}
EXPORT_SYMBOL_GPL(xdp_build_skb_from_zc);
+static bool xdp_mem_is_page_pool_backed(enum xdp_mem_type mem_type,
+ netmem_ref netmem)
+{
+ switch (mem_type) {
+ case MEM_TYPE_PAGE_POOL:
+ return true;
+ case MEM_TYPE_PAGE_POOL_OR_SHARED:
+ return xdp_netmem_is_pp(netmem_compound_head(netmem));
+ default:
+ return false;
+ }
+}
+
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct sk_buff *skb,
struct net_device *dev)
@@ -836,7 +879,8 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
* - RX ring dev queue index (skb_record_rx_queue)
*/
- if (xdpf->mem_type == MEM_TYPE_PAGE_POOL)
+ if (xdp_mem_is_page_pool_backed(xdpf->mem_type,
+ virt_to_netmem(xdpf->data)))
skb_mark_for_recycle(skb);
/* Allow SKB to reuse area used by xdp_frame */
--
2.43.0
next prev parent reply other threads:[~2026-05-09 8:49 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-09 8:48 [PATCH RFC net-next 0/4] xdp: reuse generic skb XDP handling for veth Maciej Fijalkowski
2026-05-09 8:48 ` Maciej Fijalkowski [this message]
2026-05-09 8:48 ` [PATCH RFC net-next 2/4] xdp: return status from generic_xdp_tx() Maciej Fijalkowski
2026-05-12 12:57 ` Björn Töpel
2026-05-12 17:13 ` Maciej Fijalkowski
2026-05-09 8:48 ` [PATCH RFC net-next 3/4] xdp: split generic XDP skb handling Maciej Fijalkowski
2026-05-09 8:48 ` [PATCH RFC net-next 4/4] veth: use generic skb XDP handling Maciej Fijalkowski
2026-05-12 14:32 ` Björn Töpel
2026-05-12 17:06 ` Maciej Fijalkowski
2026-05-13 11:31 ` Björn Töpel
2026-05-12 12:55 ` [PATCH RFC net-next 0/4] xdp: reuse generic skb XDP handling for veth Björn Töpel
2026-05-12 17:12 ` Maciej Fijalkowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260509084858.773921-2-maciej.fijalkowski@intel.com \
--to=maciej.fijalkowski@intel.com \
--cc=bjorn@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=hawk@kernel.org \
--cc=horms@kernel.org \
--cc=kuba@kernel.org \
--cc=lorenzo@kernel.org \
--cc=magnus.karlsson@intel.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=stfomichev@gmail.com \
--cc=toke@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox