From: Jason Xing <kerneljasonxing@gmail.com>
To: davem@davemloft.net, edumazet@google.com, kuba@kernel.org,
pabeni@redhat.com, bjorn@kernel.org, magnus.karlsson@intel.com,
maciej.fijalkowski@intel.com, jonathan.lemon@gmail.com,
sdf@fomichev.me, ast@kernel.org, daniel@iogearbox.net,
hawk@kernel.org, john.fastabend@gmail.com
Cc: bpf@vger.kernel.org, netdev@vger.kernel.org,
Jason Xing <kernelxing@tencent.com>
Subject: [PATCH net-next v2 3/3] xsk: remove spin lock protection of cached_prod
Date: Tue, 25 Nov 2025 16:54:31 +0800 [thread overview]
Message-ID: <20251125085431.4039-4-kerneljasonxing@gmail.com> (raw)
In-Reply-To: <20251125085431.4039-1-kerneljasonxing@gmail.com>
From: Jason Xing <kernelxing@tencent.com>
Remove the spin lock protection along with some functions adjusted.
Now cached_prod is fully converted to atomic, which improves the
performance by around 5% over different platforms.
Signed-off-by: Jason Xing <kernelxing@tencent.com>
---
include/net/xsk_buff_pool.h | 5 -----
net/xdp/xsk.c | 21 ++++-----------------
net/xdp/xsk_buff_pool.c | 1 -
3 files changed, 4 insertions(+), 23 deletions(-)
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 92a2358c6ce3..0b1abdb99c9e 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -90,11 +90,6 @@ struct xsk_buff_pool {
* destructor callback.
*/
spinlock_t cq_prod_lock;
- /* Mutual exclusion of the completion ring in the SKB mode.
- * Protect: when sockets share a single cq when the same netdev
- * and queue id is shared.
- */
- spinlock_t cq_cached_prod_lock;
struct xdp_buff_xsk *free_heads[];
};
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index b63409b1422e..ae8a92c168b8 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -546,17 +546,6 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
}
-static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
-{
- int ret;
-
- spin_lock(&pool->cq_cached_prod_lock);
- ret = xsk_cq_cached_prod_reserve(pool->cq);
- spin_unlock(&pool->cq_cached_prod_lock);
-
- return ret;
-}
-
static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
struct sk_buff *skb)
{
@@ -585,11 +574,9 @@ static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
spin_unlock_irqrestore(&pool->cq_prod_lock, flags);
}
-static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
+static void xsk_cq_cached_prod_cancel(struct xsk_buff_pool *pool, u32 n)
{
- spin_lock(&pool->cq_cached_prod_lock);
atomic_sub(n, &pool->cq->cached_prod_atomic);
- spin_unlock(&pool->cq_cached_prod_lock);
}
static void xsk_inc_num_desc(struct sk_buff *skb)
@@ -643,7 +630,7 @@ static void xsk_consume_skb(struct sk_buff *skb)
}
skb->destructor = sock_wfree;
- xsk_cq_cancel_locked(xs->pool, num_descs);
+ xsk_cq_cached_prod_cancel(xs->pool, num_descs);
/* Free skb without triggering the perf drop trace */
consume_skb(skb);
xs->skb = NULL;
@@ -860,7 +847,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
xskq_cons_release(xs->tx);
} else {
/* Let application retry */
- xsk_cq_cancel_locked(xs->pool, 1);
+ xsk_cq_cached_prod_cancel(xs->pool, 1);
}
return ERR_PTR(err);
@@ -898,7 +885,7 @@ static int __xsk_generic_xmit(struct sock *sk)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- err = xsk_cq_reserve_locked(xs->pool);
+ err = xsk_cq_cached_prod_reserve(xs->pool->cq);
if (err) {
err = -EAGAIN;
goto out;
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 51526034c42a..9539f121b290 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -91,7 +91,6 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
INIT_LIST_HEAD(&pool->xsk_tx_list);
spin_lock_init(&pool->xsk_tx_list_lock);
spin_lock_init(&pool->cq_prod_lock);
- spin_lock_init(&pool->cq_cached_prod_lock);
refcount_set(&pool->users, 1);
pool->fq = xs->fq_tmp;
--
2.41.3
next prev parent reply other threads:[~2025-11-25 8:54 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-25 8:54 [PATCH net-next v2 0/3] xsk: introduce atomic for cq in generic path Jason Xing
2025-11-25 8:54 ` [PATCH net-next v2 1/3] xsk: add atomic cached_prod for copy mode Jason Xing
2025-11-25 8:54 ` [PATCH net-next v2 2/3] xsk: use atomic operations around " Jason Xing
2025-11-27 11:35 ` Paolo Abeni
2025-11-27 13:55 ` Jason Xing
2025-11-27 15:32 ` Paolo Abeni
2025-11-27 23:48 ` Jason Xing
2025-11-25 8:54 ` Jason Xing [this message]
2025-11-27 11:29 ` [PATCH net-next v2 3/3] xsk: remove spin lock protection of cached_prod Paolo Abeni
2025-11-27 13:18 ` Jason Xing
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251125085431.4039-4-kerneljasonxing@gmail.com \
--to=kerneljasonxing@gmail.com \
--cc=ast@kernel.org \
--cc=bjorn@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=hawk@kernel.org \
--cc=john.fastabend@gmail.com \
--cc=jonathan.lemon@gmail.com \
--cc=kernelxing@tencent.com \
--cc=kuba@kernel.org \
--cc=maciej.fijalkowski@intel.com \
--cc=magnus.karlsson@intel.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=sdf@fomichev.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).