bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH bpf v1] bpf: Drop rqspinlock usage in ringbuf
@ 2025-08-21 16:26 Kumar Kartikeya Dwivedi
  2025-08-21 19:35 ` Andrii Nakryiko
  0 siblings, 1 reply; 2+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2025-08-21 16:26 UTC (permalink / raw)
  To: bpf
  Cc: Josef Bacik, stable, Alexei Starovoitov, Andrii Nakryiko,
	Daniel Borkmann, Martin KaFai Lau, Eduard Zingerman, kkd,
	kernel-team

We noticed potential lock ups and delays in progs running in NMI context
with the rqspinlock changes, which suggests more improvements need to be
made before we can support ring buffer updates in such a context safely.
Revert the change for now.

Reported-by: Josef Bacik <josef@toxicpanda.com>
Cc: stable@vger.kernel.org
Fixes: a650d38915c1 ("bpf: Convert ringbuf map to rqspinlock")
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 kernel/bpf/ringbuf.c | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 719d73299397..1499d8caa9a3 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -11,7 +11,6 @@
 #include <linux/kmemleak.h>
 #include <uapi/linux/btf.h>
 #include <linux/btf_ids.h>
-#include <asm/rqspinlock.h>

 #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)

@@ -30,7 +29,7 @@ struct bpf_ringbuf {
 	u64 mask;
 	struct page **pages;
 	int nr_pages;
-	rqspinlock_t spinlock ____cacheline_aligned_in_smp;
+	raw_spinlock_t spinlock ____cacheline_aligned_in_smp;
 	/* For user-space producer ring buffers, an atomic_t busy bit is used
 	 * to synchronize access to the ring buffers in the kernel, rather than
 	 * the spinlock that is used for kernel-producer ring buffers. This is
@@ -174,7 +173,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
 	if (!rb)
 		return NULL;

-	raw_res_spin_lock_init(&rb->spinlock);
+	raw_spin_lock_init(&rb->spinlock);
 	atomic_set(&rb->busy, 0);
 	init_waitqueue_head(&rb->waitq);
 	init_irq_work(&rb->work, bpf_ringbuf_notify);
@@ -417,8 +416,12 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)

 	cons_pos = smp_load_acquire(&rb->consumer_pos);

-	if (raw_res_spin_lock_irqsave(&rb->spinlock, flags))
-		return NULL;
+	if (in_nmi()) {
+		if (!raw_spin_trylock_irqsave(&rb->spinlock, flags))
+			return NULL;
+	} else {
+		raw_spin_lock_irqsave(&rb->spinlock, flags);
+	}

 	pend_pos = rb->pending_pos;
 	prod_pos = rb->producer_pos;
@@ -443,7 +446,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
 	 */
 	if (new_prod_pos - cons_pos > rb->mask ||
 	    new_prod_pos - pend_pos > rb->mask) {
-		raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
+		raw_spin_unlock_irqrestore(&rb->spinlock, flags);
 		return NULL;
 	}

@@ -455,7 +458,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
 	/* pairs with consumer's smp_load_acquire() */
 	smp_store_release(&rb->producer_pos, new_prod_pos);

-	raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
+	raw_spin_unlock_irqrestore(&rb->spinlock, flags);

 	return (void *)hdr + BPF_RINGBUF_HDR_SZ;
 }
--
2.50.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH bpf v1] bpf: Drop rqspinlock usage in ringbuf
  2025-08-21 16:26 [PATCH bpf v1] bpf: Drop rqspinlock usage in ringbuf Kumar Kartikeya Dwivedi
@ 2025-08-21 19:35 ` Andrii Nakryiko
  0 siblings, 0 replies; 2+ messages in thread
From: Andrii Nakryiko @ 2025-08-21 19:35 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: bpf, Josef Bacik, stable, Alexei Starovoitov, Andrii Nakryiko,
	Daniel Borkmann, Martin KaFai Lau, Eduard Zingerman, kkd,
	kernel-team

On Thu, Aug 21, 2025 at 9:26 AM Kumar Kartikeya Dwivedi
<memxor@gmail.com> wrote:
>
> We noticed potential lock ups and delays in progs running in NMI context
> with the rqspinlock changes, which suggests more improvements need to be
> made before we can support ring buffer updates in such a context safely.
> Revert the change for now.
>
> Reported-by: Josef Bacik <josef@toxicpanda.com>
> Cc: stable@vger.kernel.org
> Fixes: a650d38915c1 ("bpf: Convert ringbuf map to rqspinlock")
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> ---
>  kernel/bpf/ringbuf.c | 17 ++++++++++-------
>  1 file changed, 10 insertions(+), 7 deletions(-)
>

patch bot seems to be ignoring this patch, it was applied to bpf tree, thanks!

> diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
> index 719d73299397..1499d8caa9a3 100644
> --- a/kernel/bpf/ringbuf.c
> +++ b/kernel/bpf/ringbuf.c
> @@ -11,7 +11,6 @@
>  #include <linux/kmemleak.h>
>  #include <uapi/linux/btf.h>
>  #include <linux/btf_ids.h>
> -#include <asm/rqspinlock.h>
>
>  #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
>
> @@ -30,7 +29,7 @@ struct bpf_ringbuf {
>         u64 mask;
>         struct page **pages;
>         int nr_pages;
> -       rqspinlock_t spinlock ____cacheline_aligned_in_smp;
> +       raw_spinlock_t spinlock ____cacheline_aligned_in_smp;
>         /* For user-space producer ring buffers, an atomic_t busy bit is used
>          * to synchronize access to the ring buffers in the kernel, rather than
>          * the spinlock that is used for kernel-producer ring buffers. This is
> @@ -174,7 +173,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
>         if (!rb)
>                 return NULL;
>
> -       raw_res_spin_lock_init(&rb->spinlock);
> +       raw_spin_lock_init(&rb->spinlock);
>         atomic_set(&rb->busy, 0);
>         init_waitqueue_head(&rb->waitq);
>         init_irq_work(&rb->work, bpf_ringbuf_notify);
> @@ -417,8 +416,12 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
>
>         cons_pos = smp_load_acquire(&rb->consumer_pos);
>
> -       if (raw_res_spin_lock_irqsave(&rb->spinlock, flags))
> -               return NULL;
> +       if (in_nmi()) {
> +               if (!raw_spin_trylock_irqsave(&rb->spinlock, flags))
> +                       return NULL;
> +       } else {
> +               raw_spin_lock_irqsave(&rb->spinlock, flags);
> +       }
>
>         pend_pos = rb->pending_pos;
>         prod_pos = rb->producer_pos;
> @@ -443,7 +446,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
>          */
>         if (new_prod_pos - cons_pos > rb->mask ||
>             new_prod_pos - pend_pos > rb->mask) {
> -               raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
> +               raw_spin_unlock_irqrestore(&rb->spinlock, flags);
>                 return NULL;
>         }
>
> @@ -455,7 +458,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
>         /* pairs with consumer's smp_load_acquire() */
>         smp_store_release(&rb->producer_pos, new_prod_pos);
>
> -       raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
> +       raw_spin_unlock_irqrestore(&rb->spinlock, flags);
>
>         return (void *)hdr + BPF_RINGBUF_HDR_SZ;
>  }
> --
> 2.50.0
>

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2025-08-21 19:35 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-21 16:26 [PATCH bpf v1] bpf: Drop rqspinlock usage in ringbuf Kumar Kartikeya Dwivedi
2025-08-21 19:35 ` Andrii Nakryiko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).