BPF List
 help / color / mirror / Atom feed
* [PATCH bpf-next v2 1/2] rqspinlock: Annotate rqspinlock lock acquiring functions with __must_check
@ 2025-11-21 20:57 Amery Hung
  2025-11-21 20:57 ` [PATCH bpf-next v2 2/2] rqspinlock: Handle return of raw_res_spin_lock{_irqsave} in locktorture Amery Hung
  2025-11-21 21:27 ` [PATCH bpf-next v2 1/2] rqspinlock: Annotate rqspinlock lock acquiring functions with __must_check bot+bpf-ci
  0 siblings, 2 replies; 4+ messages in thread
From: Amery Hung @ 2025-11-21 20:57 UTC (permalink / raw)
  To: bpf
  Cc: netdev, alexei.starovoitov, andrii, daniel, memxor,
	david.laight.linux, dave, paulmck, josh, ameryhung, kernel-team

Locking a resilient queued spinlock can fail when deadlock or timeout
happen. Mark the lock acquring functions with __must_check to make sure
callers always handle the returned error.

Suggested-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
 include/asm-generic/rqspinlock.h | 47 +++++++++++++++++++-------------
 1 file changed, 28 insertions(+), 19 deletions(-)

diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
index 6d4244d643df..855c09435506 100644
--- a/include/asm-generic/rqspinlock.h
+++ b/include/asm-generic/rqspinlock.h
@@ -171,7 +171,7 @@ static __always_inline void release_held_lock_entry(void)
  * * -EDEADLK	- Lock acquisition failed because of AA/ABBA deadlock.
  * * -ETIMEDOUT - Lock acquisition failed because of timeout.
  */
-static __always_inline int res_spin_lock(rqspinlock_t *lock)
+static __always_inline __must_check int res_spin_lock(rqspinlock_t *lock)
 {
 	int val = 0;
 
@@ -223,27 +223,36 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock)
 #define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; })
 #endif
 
-#define raw_res_spin_lock(lock)                    \
-	({                                         \
-		int __ret;                         \
-		preempt_disable();                 \
-		__ret = res_spin_lock(lock);	   \
-		if (__ret)                         \
-			preempt_enable();          \
-		__ret;                             \
-	})
+static __always_inline __must_check int raw_res_spin_lock(rqspinlock_t *lock)
+{
+	int ret;
+
+	preempt_disable();
+	ret = res_spin_lock(lock);
+	if (ret)
+		preempt_enable();
+
+	return ret;
+}
 
 #define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); })
 
-#define raw_res_spin_lock_irqsave(lock, flags)    \
-	({                                        \
-		int __ret;                        \
-		local_irq_save(flags);            \
-		__ret = raw_res_spin_lock(lock);  \
-		if (__ret)                        \
-			local_irq_restore(flags); \
-		__ret;                            \
-	})
+static __always_inline __must_check int
+__raw_res_spin_lock_irqsave(rqspinlock_t *lock, unsigned long *flags)
+{
+	unsigned long __flags;
+	int ret;
+
+	local_irq_save(__flags);
+	ret = raw_res_spin_lock(lock);
+	if (ret)
+		local_irq_restore(__flags);
+
+	*flags = __flags;
+	return ret;
+}
+
+#define raw_res_spin_lock_irqsave(lock, flags) __raw_res_spin_lock_irqsave(lock, &flags)
 
 #define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); })
 
-- 
2.47.3


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH bpf-next v2 2/2] rqspinlock: Handle return of raw_res_spin_lock{_irqsave} in locktorture
  2025-11-21 20:57 [PATCH bpf-next v2 1/2] rqspinlock: Annotate rqspinlock lock acquiring functions with __must_check Amery Hung
@ 2025-11-21 20:57 ` Amery Hung
  2025-11-21 21:27 ` [PATCH bpf-next v2 1/2] rqspinlock: Annotate rqspinlock lock acquiring functions with __must_check bot+bpf-ci
  1 sibling, 0 replies; 4+ messages in thread
From: Amery Hung @ 2025-11-21 20:57 UTC (permalink / raw)
  To: bpf
  Cc: netdev, alexei.starovoitov, andrii, daniel, memxor,
	david.laight.linux, dave, paulmck, josh, ameryhung, kernel-team

Return errors from raw_res_spin_lock{_irqsave}() to writelock(). This is
simply to silence the unused result warning. lock_torture_writer()
currently does not handle errors returned from writelock(). This aligns
with the existing torture test for ww_mutex.

Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
 kernel/locking/locktorture.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index ce0362f0a871..2b3686b96907 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -369,8 +369,7 @@ static rqspinlock_t rqspinlock;
 
 static int torture_raw_res_spin_write_lock(int tid __maybe_unused)
 {
-	raw_res_spin_lock(&rqspinlock);
-	return 0;
+	return raw_res_spin_lock(&rqspinlock);
 }
 
 static void torture_raw_res_spin_write_unlock(int tid __maybe_unused)
@@ -392,8 +391,12 @@ static struct lock_torture_ops raw_res_spin_lock_ops = {
 static int torture_raw_res_spin_write_lock_irq(int tid __maybe_unused)
 {
 	unsigned long flags;
+	int err;
+
+	err = raw_res_spin_lock_irqsave(&rqspinlock, flags);
+	if (err)
+		return err;
 
-	raw_res_spin_lock_irqsave(&rqspinlock, flags);
 	cxt.cur_ops->flags = flags;
 	return 0;
 }
-- 
2.47.3


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH bpf-next v2 1/2] rqspinlock: Annotate rqspinlock lock acquiring functions with __must_check
  2025-11-21 20:57 [PATCH bpf-next v2 1/2] rqspinlock: Annotate rqspinlock lock acquiring functions with __must_check Amery Hung
  2025-11-21 20:57 ` [PATCH bpf-next v2 2/2] rqspinlock: Handle return of raw_res_spin_lock{_irqsave} in locktorture Amery Hung
@ 2025-11-21 21:27 ` bot+bpf-ci
  2025-11-21 21:54   ` Amery Hung
  1 sibling, 1 reply; 4+ messages in thread
From: bot+bpf-ci @ 2025-11-21 21:27 UTC (permalink / raw)
  To: ameryhung, bpf
  Cc: netdev, alexei.starovoitov, andrii, daniel, memxor,
	david.laight.linux, dave, paulmck, josh, ameryhung, kernel-team,
	ast, andrii, daniel, martin.lau, eddyz87, yonghong.song, clm,
	ihor.solodrai

[-- Attachment #1: Type: text/plain, Size: 3118 bytes --]

> diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
> index 6d4244d64..855c09435 100644
> --- a/include/asm-generic/rqspinlock.h
> +++ b/include/asm-generic/rqspinlock.h

[ ... ]

> @@ -171,7 +171,7 @@ static __always_inline void release_held_lock_entry(void)
>   * * -EDEADLK	- Lock acquisition failed because of AA/ABBA deadlock.
>   * * -ETIMEDOUT - Lock acquisition failed because of timeout.
>   */
> -static __always_inline int res_spin_lock(rqspinlock_t *lock)
> +static __always_inline __must_check int res_spin_lock(rqspinlock_t *lock)
>  {
>  	int val = 0;
>
> @@ -223,27 +223,36 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock)
>  #define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; })
>  #endif
>
> -#define raw_res_spin_lock(lock)                    \
> -	({                                         \
> -		int __ret;                         \
> -		preempt_disable();                 \
> -		__ret = res_spin_lock(lock);	   \
> -		if (__ret)                         \
> -			preempt_enable();          \
> -		__ret;                             \
> -	})
> +static __always_inline __must_check int raw_res_spin_lock(rqspinlock_t *lock)
> +{
> +	int ret;
> +
> +	preempt_disable();
> +	ret = res_spin_lock(lock);
> +	if (ret)
> +		preempt_enable();
> +
> +	return ret;
> +}
>
>  #define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); })
>
> -#define raw_res_spin_lock_irqsave(lock, flags)    \
> -	({                                        \
> -		int __ret;                        \
> -		local_irq_save(flags);            \
> -		__ret = raw_res_spin_lock(lock);  \
> -		if (__ret)                        \
> -			local_irq_restore(flags); \
> -		__ret;                            \
> -	})
> +static __always_inline __must_check int
> +__raw_res_spin_lock_irqsave(rqspinlock_t *lock, unsigned long *flags)
> +{
> +	unsigned long __flags;
> +	int ret;
> +
> +	local_irq_save(__flags);
> +	ret = raw_res_spin_lock(lock);
> +	if (ret)
> +		local_irq_restore(__flags);
> +
> +	*flags = __flags;
> +	return ret;
> +}
> +
> +#define raw_res_spin_lock_irqsave(lock, flags) __raw_res_spin_lock_irqsave(lock, &flags)

The __must_check annotations will cause compiler warnings in
kernel/locking/locktorture.c where torture_raw_res_spin_write_lock()
and torture_raw_res_spin_write_lock_irq() ignore lock acquisition
failures.

When raw_res_spin_lock() returns an error (deadlock or timeout), the
torture test functions still return 0 to indicate success. The torture
framework then calls the unlock functions, but the lock was never
acquired. In res_spin_unlock(), this causes an out-of-bounds array
access when computing locks[cnt - 1] with cnt=0, potentially corrupting
per-CPU memory.

Should the torture test handle lock acquisition failures, or use
different lock types that cannot fail?


---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md

CI run summary: https://github.com/kernel-patches/bpf/actions/runs/19583558278

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH bpf-next v2 1/2] rqspinlock: Annotate rqspinlock lock acquiring functions with __must_check
  2025-11-21 21:27 ` [PATCH bpf-next v2 1/2] rqspinlock: Annotate rqspinlock lock acquiring functions with __must_check bot+bpf-ci
@ 2025-11-21 21:54   ` Amery Hung
  0 siblings, 0 replies; 4+ messages in thread
From: Amery Hung @ 2025-11-21 21:54 UTC (permalink / raw)
  To: bot+bpf-ci
  Cc: bpf, netdev, alexei.starovoitov, andrii, daniel, memxor,
	david.laight.linux, dave, paulmck, josh, kernel-team, ast,
	martin.lau, eddyz87, yonghong.song, clm, ihor.solodrai

On Fri, Nov 21, 2025 at 1:27 PM <bot+bpf-ci@kernel.org> wrote:
>
> > diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
> > index 6d4244d64..855c09435 100644
> > --- a/include/asm-generic/rqspinlock.h
> > +++ b/include/asm-generic/rqspinlock.h
>
> [ ... ]
>
> > @@ -171,7 +171,7 @@ static __always_inline void release_held_lock_entry(void)
> >   * * -EDEADLK        - Lock acquisition failed because of AA/ABBA deadlock.
> >   * * -ETIMEDOUT - Lock acquisition failed because of timeout.
> >   */
> > -static __always_inline int res_spin_lock(rqspinlock_t *lock)
> > +static __always_inline __must_check int res_spin_lock(rqspinlock_t *lock)
> >  {
> >       int val = 0;
> >
> > @@ -223,27 +223,36 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock)
> >  #define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; })
> >  #endif
> >
> > -#define raw_res_spin_lock(lock)                    \
> > -     ({                                         \
> > -             int __ret;                         \
> > -             preempt_disable();                 \
> > -             __ret = res_spin_lock(lock);       \
> > -             if (__ret)                         \
> > -                     preempt_enable();          \
> > -             __ret;                             \
> > -     })
> > +static __always_inline __must_check int raw_res_spin_lock(rqspinlock_t *lock)
> > +{
> > +     int ret;
> > +
> > +     preempt_disable();
> > +     ret = res_spin_lock(lock);
> > +     if (ret)
> > +             preempt_enable();
> > +
> > +     return ret;
> > +}
> >
> >  #define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); })
> >
> > -#define raw_res_spin_lock_irqsave(lock, flags)    \
> > -     ({                                        \
> > -             int __ret;                        \
> > -             local_irq_save(flags);            \
> > -             __ret = raw_res_spin_lock(lock);  \
> > -             if (__ret)                        \
> > -                     local_irq_restore(flags); \
> > -             __ret;                            \
> > -     })
> > +static __always_inline __must_check int
> > +__raw_res_spin_lock_irqsave(rqspinlock_t *lock, unsigned long *flags)
> > +{
> > +     unsigned long __flags;
> > +     int ret;
> > +
> > +     local_irq_save(__flags);
> > +     ret = raw_res_spin_lock(lock);
> > +     if (ret)
> > +             local_irq_restore(__flags);
> > +
> > +     *flags = __flags;
> > +     return ret;
> > +}
> > +
> > +#define raw_res_spin_lock_irqsave(lock, flags) __raw_res_spin_lock_irqsave(lock, &flags)
>
> The __must_check annotations will cause compiler warnings in
> kernel/locking/locktorture.c where torture_raw_res_spin_write_lock()
> and torture_raw_res_spin_write_lock_irq() ignore lock acquisition
> failures.

This should be fine. I compiled with gcc and clang and they all seem
to not propagate the check more than one level.

>
> When raw_res_spin_lock() returns an error (deadlock or timeout), the
> torture test functions still return 0 to indicate success. The torture
> framework then calls the unlock functions, but the lock was never
> acquired. In res_spin_unlock(), this causes an out-of-bounds array
> access when computing locks[cnt - 1] with cnt=0, potentially corrupting
> per-CPU memory.
>
> Should the torture test handle lock acquisition failures, or use
> different lock types that cannot fail?
>

Deadlock should not happen in this case, but for the correctness of
the code I can send another patch to address it if people find that
necessary.

Perhaps something like this:

@@ -931,7 +931,11 @@ static int lock_torture_writer(void *arg)
                if (!skip_main_lock) {
                        if (acq_writer_lim > 0)
                                j = jiffies;
-                       cxt.cur_ops->writelock(tid);
+                       err = cxt.cur_ops->writelock(tid);
+                       if (WARN_ON_ONCE(err)) {
+                               lwsp->n_lock_fail++;
+                               goto nested_unlock;
+                       }
                        if (WARN_ON_ONCE(lock_is_write_held))
                                lwsp->n_lock_fail++;
                        lock_is_write_held = true;
@@ -951,6 +955,7 @@ static int lock_torture_writer(void *arg)
                        WRITE_ONCE(last_lock_release, jiffies);
                        cxt.cur_ops->writeunlock(tid);
                }
+nested_unlock:
                if (cxt.cur_ops->nested_unlock)
                        cxt.cur_ops->nested_unlock(tid, lockset_mask);

>
> ---
> AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
> See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
>
> CI run summary: https://github.com/kernel-patches/bpf/actions/runs/19583558278

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2025-11-21 21:54 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-21 20:57 [PATCH bpf-next v2 1/2] rqspinlock: Annotate rqspinlock lock acquiring functions with __must_check Amery Hung
2025-11-21 20:57 ` [PATCH bpf-next v2 2/2] rqspinlock: Handle return of raw_res_spin_lock{_irqsave} in locktorture Amery Hung
2025-11-21 21:27 ` [PATCH bpf-next v2 1/2] rqspinlock: Annotate rqspinlock lock acquiring functions with __must_check bot+bpf-ci
2025-11-21 21:54   ` Amery Hung

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox