netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jason Xing <kerneljasonxing@gmail.com>
To: davem@davemloft.net, edumazet@google.com, kuba@kernel.org,
	pabeni@redhat.com, bjorn@kernel.org, magnus.karlsson@intel.com,
	maciej.fijalkowski@intel.com, jonathan.lemon@gmail.com,
	sdf@fomichev.me, ast@kernel.org, daniel@iogearbox.net,
	hawk@kernel.org, john.fastabend@gmail.com, horms@kernel.org,
	andrew+netdev@lunn.ch
Cc: bpf@vger.kernel.org, netdev@vger.kernel.org,
	Jason Xing <kernelxing@tencent.com>
Subject: [PATCH net-next v2 5/9] xsk: add xsk_alloc_batch_skb() to build skbs in batch
Date: Mon, 25 Aug 2025 21:53:38 +0800	[thread overview]
Message-ID: <20250825135342.53110-6-kerneljasonxing@gmail.com> (raw)
In-Reply-To: <20250825135342.53110-1-kerneljasonxing@gmail.com>

From: Jason Xing <kernelxing@tencent.com>

Support allocating and building skbs in batch.

This patch uses kmem_cache_alloc_bulk() to complete the batch allocation
which relies on the global common cache 'net_hotdata.skbuff_cache'. Use
a xsk standalone skb cache (namely, xs->skb_cache) to store skbs instead
of resorting to napi_alloc_cache that was designed for softirq condition.
In case that memory shortage occurs, to avoid frequently allocating
skbs and then freeing part of them, using the allocated skbs from cache
in a reversed order (like from 10, 9, ..., 2, 1, 0) solves the issue.

After allocating memory for each of skbs, in a 'for' loop, the patch
borrows part of __allocate_skb() to initializing skb and then calls
xsk_build_skb() to complete the rest of whole process, like copying data
and stuff.

Considering passing no fclone flag during allocation period, in terms of
freeing process, napi_consume_skb() in the tx completion would put the
skb into different and global cache 'net_hotdata.skbuff_cache' that
implements the deferred freeing skb feature to avoid freeing skb one
by one.

Signed-off-by: Jason Xing <kernelxing@tencent.com>
---
 include/net/xdp_sock.h |   3 ++
 net/core/skbuff.c      | 103 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 106 insertions(+)

diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index cbba880c27c3..b533317409df 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -92,6 +92,7 @@ struct xdp_sock {
 	struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
 	struct sk_buff **skb_cache;
 	struct xdp_desc *desc_batch;
+	unsigned int skb_count;
 };
 
 /*
@@ -127,6 +128,8 @@ struct xsk_tx_metadata_ops {
 struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
 			      struct sk_buff *allocated_skb,
 			      struct xdp_desc *desc);
+int xsk_alloc_batch_skb(struct xdp_sock *xs, u32 nb_pkts, u32 nb_descs,
+			int *consumed, int *start, int *end);
 #ifdef CONFIG_XDP_SOCKETS
 
 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ee0274417948..c9071e56d133 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -80,6 +80,8 @@
 #include <net/mctp.h>
 #include <net/page_pool/helpers.h>
 #include <net/dropreason.h>
+#include <net/xdp_sock.h>
+#include <net/xsk_buff_pool.h>
 
 #include <linux/uaccess.h>
 #include <trace/events/skb.h>
@@ -614,6 +616,107 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
 	return obj;
 }
 
+int xsk_alloc_batch_skb(struct xdp_sock *xs, u32 nb_pkts, u32 nb_descs,
+			int *consumed, int *start, int *end)
+{
+	struct xdp_desc *descs = xs->desc_batch;
+	struct sk_buff **skbs = xs->skb_cache;
+	gfp_t gfp_mask = xs->sk.sk_allocation;
+	struct net_device *dev = xs->dev;
+	int node = NUMA_NO_NODE;
+	struct sk_buff *skb;
+	u32 i = 0, j = 0;
+	bool pfmemalloc;
+	u32 base_len;
+	int err = 0;
+	u8 *data;
+
+	base_len = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
+	if (!(dev->priv_flags & IFF_TX_SKB_NO_LINEAR))
+		base_len += dev->needed_tailroom;
+
+	if (xs->skb_count >= nb_pkts)
+		goto build;
+
+	if (xs->skb) {
+		i = 1;
+		xs->skb_count++;
+	}
+
+	xs->skb_count += kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
+					       gfp_mask, nb_pkts - xs->skb_count,
+					       (void **)&skbs[xs->skb_count]);
+	if (xs->skb_count < nb_pkts)
+		nb_pkts = xs->skb_count;
+
+build:
+	for (i = 0, j = 0; j < nb_descs; j++) {
+		if (!xs->skb) {
+			u32 size = base_len + descs[j].len;
+
+			/* In case we don't have enough allocated skbs */
+			if (i >= nb_pkts) {
+				err = -EAGAIN;
+				break;
+			}
+
+			if (sk_wmem_alloc_get(&xs->sk) > READ_ONCE(xs->sk.sk_sndbuf)) {
+				err = -EAGAIN;
+				break;
+			}
+
+			skb = skbs[xs->skb_count - 1 - i];
+
+			prefetchw(skb);
+			/* We do our best to align skb_shared_info on a separate cache
+			 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
+			 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
+			 * Both skb->head and skb_shared_info are cache line aligned.
+			 */
+			data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
+			if (unlikely(!data)) {
+				err = -ENOBUFS;
+				break;
+			}
+			/* kmalloc_size_roundup() might give us more room than requested.
+			 * Put skb_shared_info exactly at the end of allocated zone,
+			 * to allow max possible filling before reallocation.
+			 */
+			prefetchw(data + SKB_WITH_OVERHEAD(size));
+
+			memset(skb, 0, offsetof(struct sk_buff, tail));
+			__build_skb_around(skb, data, size);
+			skb->pfmemalloc = pfmemalloc;
+			skb_set_owner_w(skb, &xs->sk);
+		} else if (unlikely(i == 0)) {
+			/* We have a skb in cache that is left last time */
+			kmem_cache_free(net_hotdata.skbuff_cache, skbs[xs->skb_count - 1]);
+			skbs[xs->skb_count - 1] = xs->skb;
+		}
+
+		skb = xsk_build_skb(xs, skb, &descs[j]);
+		if (IS_ERR(skb)) {
+			err = PTR_ERR(skb);
+			break;
+		}
+
+		if (xp_mb_desc(&descs[j])) {
+			xs->skb = skb;
+			continue;
+		}
+
+		xs->skb = NULL;
+		i++;
+	}
+
+	*consumed = j;
+	*start = xs->skb_count - 1;
+	*end = xs->skb_count - i;
+	xs->skb_count -= i;
+
+	return err;
+}
+
 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
  *	'private' fields and also do memory statistics to find all the
  *	[BEEP] leaks.
-- 
2.41.3


  parent reply	other threads:[~2025-08-25 13:54 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-08-25 13:53 [PATCH net-next v2 0/9] xsk: improvement performance in copy mode Jason Xing
2025-08-25 13:53 ` [PATCH net-next v2 1/9] xsk: introduce XDP_GENERIC_XMIT_BATCH setsockopt Jason Xing
2025-08-25 13:53 ` [PATCH net-next v2 2/9] xsk: add descs parameter in xskq_cons_read_desc_batch() Jason Xing
2025-08-25 21:18   ` Maciej Fijalkowski
2025-08-26  0:10     ` Jason Xing
2025-08-25 13:53 ` [PATCH net-next v2 3/9] xsk: introduce locked version of xskq_prod_write_addr_batch Jason Xing
2025-08-25 21:42   ` Maciej Fijalkowski
2025-08-26  0:13     ` Jason Xing
2025-08-25 13:53 ` [PATCH net-next v2 4/9] xsk: extend xsk_build_skb() to support passing an already allocated skb Jason Xing
2025-08-25 21:49   ` Maciej Fijalkowski
2025-08-26  0:26     ` Jason Xing
2025-08-25 13:53 ` Jason Xing [this message]
2025-08-25 16:56   ` [PATCH net-next v2 5/9] xsk: add xsk_alloc_batch_skb() to build skbs in batch kernel test robot
2025-08-27 14:32   ` Alexander Lobakin
2025-08-28  0:38     ` Jason Xing
2025-08-28 15:28       ` Alexander Lobakin
2025-08-29  0:31         ` Jason Xing
2025-08-25 13:53 ` [PATCH net-next v2 6/9] xsk: add direct xmit in batch function Jason Xing
2025-08-25 17:34   ` Stanislav Fomichev
2025-08-26  0:27     ` Jason Xing
2025-08-25 13:53 ` [PATCH net-next v2 7/9] xsk: support batch xmit main logic Jason Xing
2025-08-25 13:53 ` [PATCH net-next v2 8/9] xsk: support generic batch xmit in copy mode Jason Xing
2025-08-25 13:53 ` [PATCH net-next v2 9/9] xsk: support dynamic xmit.more control for batch xmit Jason Xing
2025-08-25 17:44 ` [PATCH net-next v2 0/9] xsk: improvement performance in copy mode Jakub Kicinski
2025-08-26  0:01   ` Jason Xing
2025-08-26  0:29     ` Jakub Kicinski
2025-08-26  0:51       ` Jason Xing
2025-08-26  1:15         ` Jakub Kicinski
2025-08-26  1:49           ` Jason Xing
2025-08-25 21:15 ` Maciej Fijalkowski
2025-08-26  0:06   ` Jason Xing

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250825135342.53110-6-kerneljasonxing@gmail.com \
    --to=kerneljasonxing@gmail.com \
    --cc=andrew+netdev@lunn.ch \
    --cc=ast@kernel.org \
    --cc=bjorn@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=hawk@kernel.org \
    --cc=horms@kernel.org \
    --cc=john.fastabend@gmail.com \
    --cc=jonathan.lemon@gmail.com \
    --cc=kernelxing@tencent.com \
    --cc=kuba@kernel.org \
    --cc=maciej.fijalkowski@intel.com \
    --cc=magnus.karlsson@intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=sdf@fomichev.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).