* [PATCH bpf-next v1] selftests/bpf: Add ABBCCA case for rqspinlock stress test
@ 2025-10-22 17:54 Kumar Kartikeya Dwivedi
2025-10-22 22:04 ` Eduard Zingerman
0 siblings, 1 reply; 4+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2025-10-22 17:54 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann,
Martin KaFai Lau, Eduard Zingerman, kkd, kernel-team
Introduce a new mode for the rqspinlock stress test that exercises a
deadlock that won't be detected by the AA and ABBA checks, such that we
always reliably trigger the timeout fallback. We need 4 CPUs for this
particular case, as CPU 0 is untouched, and three participant CPUs for
triggering the ABBCCA case.
Refactor the lock acquisition paths in the module to better reflect the
three modes and choose the right lock depending on the context.
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
.../selftests/bpf/prog_tests/res_spin_lock.c | 11 ++-
.../bpf/test_kmods/bpf_test_rqspinlock.c | 85 ++++++++++++++-----
2 files changed, 72 insertions(+), 24 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
index 8c6c2043a432..f566d89f85ea 100644
--- a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
@@ -111,7 +111,16 @@ void serial_test_res_spin_lock_stress(void)
sleep(5);
unload_module("bpf_test_rqspinlock", false);
- ASSERT_OK(load_module_params("bpf_test_rqspinlock.ko", "test_ab=1", false), "load module ABBA");
+ ASSERT_OK(load_module_params("bpf_test_rqspinlock.ko", "test_mode=1", false), "load module ABBA");
+ sleep(5);
+ unload_module("bpf_test_rqspinlock", false);
+
+ if (libbpf_num_possible_cpus() < 4) {
+ test__skip();
+ return;
+ }
+
+ ASSERT_OK(load_module_params("bpf_test_rqspinlock.ko", "test_mode=2", false), "load module ABBCCA");
sleep(5);
unload_module("bpf_test_rqspinlock", false);
}
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
index 769206fc70e4..4cced4bb8af1 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
@@ -22,23 +22,61 @@ static struct perf_event_attr hw_attr = {
static rqspinlock_t lock_a;
static rqspinlock_t lock_b;
+static rqspinlock_t lock_c;
+
+enum rqsl_mode {
+ RQSL_MODE_AA = 0,
+ RQSL_MODE_ABBA,
+ RQSL_MODE_ABBCCA,
+};
+
+static int test_mode = RQSL_MODE_AA;
+module_param(test_mode, int, 0644);
+MODULE_PARM_DESC(test_mode,
+ "rqspinlock test mode: 0 = AA, 1 = ABBA, 2 = ABBCCA");
static struct perf_event **rqsl_evts;
static int rqsl_nevts;
-static bool test_ab = false;
-module_param(test_ab, bool, 0644);
-MODULE_PARM_DESC(test_ab, "Test ABBA situations instead of AA situations");
-
static struct task_struct **rqsl_threads;
static int rqsl_nthreads;
static atomic_t rqsl_ready_cpus = ATOMIC_INIT(0);
static int pause = 0;
-static bool nmi_locks_a(int cpu)
+static const char *rqsl_mode_names[] = {
+ [RQSL_MODE_AA] = "AA",
+ [RQSL_MODE_ABBA] = "ABBA",
+ [RQSL_MODE_ABBCCA] = "ABBCCA",
+};
+
+struct rqsl_lock_pair {
+ rqspinlock_t *worker_lock;
+ rqspinlock_t *nmi_lock;
+};
+
+static struct rqsl_lock_pair rqsl_get_lock_pair(int cpu)
{
- return (cpu & 1) && test_ab;
+ int mode = READ_ONCE(test_mode);
+
+ switch (mode) {
+ default:
+ case RQSL_MODE_AA:
+ return (struct rqsl_lock_pair){ &lock_a, &lock_a };
+ case RQSL_MODE_ABBA:
+ if (cpu & 1)
+ return (struct rqsl_lock_pair){ &lock_b, &lock_a };
+ return (struct rqsl_lock_pair){ &lock_a, &lock_b };
+ case RQSL_MODE_ABBCCA:
+ switch (cpu % 3) {
+ case 0:
+ return (struct rqsl_lock_pair){ &lock_a, &lock_b };
+ case 1:
+ return (struct rqsl_lock_pair){ &lock_b, &lock_c };
+ default:
+ return (struct rqsl_lock_pair){ &lock_c, &lock_a };
+ }
+ }
}
static int rqspinlock_worker_fn(void *arg)
@@ -51,19 +89,17 @@ static int rqspinlock_worker_fn(void *arg)
atomic_inc(&rqsl_ready_cpus);
while (!kthread_should_stop()) {
+ struct rqsl_lock_pair locks = rqsl_get_lock_pair(cpu);
+ rqspinlock_t *worker_lock = locks.worker_lock;
+
if (READ_ONCE(pause)) {
msleep(1000);
continue;
}
- if (nmi_locks_a(cpu))
- ret = raw_res_spin_lock_irqsave(&lock_b, flags);
- else
- ret = raw_res_spin_lock_irqsave(&lock_a, flags);
+ ret = raw_res_spin_lock_irqsave(worker_lock, flags);
mdelay(20);
- if (nmi_locks_a(cpu) && !ret)
- raw_res_spin_unlock_irqrestore(&lock_b, flags);
- else if (!ret)
- raw_res_spin_unlock_irqrestore(&lock_a, flags);
+ if (!ret)
+ raw_res_spin_unlock_irqrestore(worker_lock, flags);
cpu_relax();
}
return 0;
@@ -91,6 +127,7 @@ static int rqspinlock_worker_fn(void *arg)
static void nmi_cb(struct perf_event *event, struct perf_sample_data *data,
struct pt_regs *regs)
{
+ struct rqsl_lock_pair locks;
int cpu = smp_processor_id();
unsigned long flags;
int ret;
@@ -98,17 +135,13 @@ static void nmi_cb(struct perf_event *event, struct perf_sample_data *data,
if (!cpu || READ_ONCE(pause))
return;
- if (nmi_locks_a(cpu))
- ret = raw_res_spin_lock_irqsave(&lock_a, flags);
- else
- ret = raw_res_spin_lock_irqsave(test_ab ? &lock_b : &lock_a, flags);
+ locks = rqsl_get_lock_pair(cpu);
+ ret = raw_res_spin_lock_irqsave(locks.nmi_lock, flags);
mdelay(10);
- if (nmi_locks_a(cpu) && !ret)
- raw_res_spin_unlock_irqrestore(&lock_a, flags);
- else if (!ret)
- raw_res_spin_unlock_irqrestore(test_ab ? &lock_b : &lock_a, flags);
+ if (!ret)
+ raw_res_spin_unlock_irqrestore(locks.nmi_lock, flags);
}
static void free_rqsl_threads(void)
@@ -142,13 +175,19 @@ static int bpf_test_rqspinlock_init(void)
int i, ret;
int ncpus = num_online_cpus();
- pr_err("Mode = %s\n", test_ab ? "ABBA" : "AA");
+ if (test_mode < RQSL_MODE_AA || test_mode > RQSL_MODE_ABBCCA) {
+ pr_err("Invalid mode %d\n", test_mode);
+ return -EINVAL;
+ }
+
+ pr_err("Mode = %s\n", rqsl_mode_names[test_mode]);
if (ncpus < 3)
return -ENOTSUPP;
raw_res_spin_lock_init(&lock_a);
raw_res_spin_lock_init(&lock_b);
+ raw_res_spin_lock_init(&lock_c);
rqsl_evts = kcalloc(ncpus - 1, sizeof(*rqsl_evts), GFP_KERNEL);
if (!rqsl_evts)
--
2.51.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH bpf-next v1] selftests/bpf: Add ABBCCA case for rqspinlock stress test
2025-10-22 17:54 [PATCH bpf-next v1] selftests/bpf: Add ABBCCA case for rqspinlock stress test Kumar Kartikeya Dwivedi
@ 2025-10-22 22:04 ` Eduard Zingerman
2025-10-24 16:42 ` Amery Hung
0 siblings, 1 reply; 4+ messages in thread
From: Eduard Zingerman @ 2025-10-22 22:04 UTC (permalink / raw)
To: Kumar Kartikeya Dwivedi, bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann,
Martin KaFai Lau, kkd, kernel-team
On Wed, 2025-10-22 at 17:54 +0000, Kumar Kartikeya Dwivedi wrote:
> Introduce a new mode for the rqspinlock stress test that exercises a
> deadlock that won't be detected by the AA and ABBA checks, such that we
> always reliably trigger the timeout fallback. We need 4 CPUs for this
> particular case, as CPU 0 is untouched, and three participant CPUs for
> triggering the ABBCCA case.
>
> Refactor the lock acquisition paths in the module to better reflect the
> three modes and choose the right lock depending on the context.
>
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> ---
The overhaul makes sense to me and the code is easy to follow.
The only nit I have is that test does not fail if deadlock is not detected.
E.g. if I remove raw_res_spin_unlock_irqrestore() call in nmi_cb(),
there are stall splats in dmesg, but test harness reports success.
I suggest adding some signal that all kthreads terminated successfully.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
[...]
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH bpf-next v1] selftests/bpf: Add ABBCCA case for rqspinlock stress test
2025-10-22 22:04 ` Eduard Zingerman
@ 2025-10-24 16:42 ` Amery Hung
2025-10-24 16:49 ` Eduard Zingerman
0 siblings, 1 reply; 4+ messages in thread
From: Amery Hung @ 2025-10-24 16:42 UTC (permalink / raw)
To: Eduard Zingerman
Cc: Kumar Kartikeya Dwivedi, bpf, Alexei Starovoitov, Andrii Nakryiko,
Daniel Borkmann, Martin KaFai Lau, kkd, kernel-team
On Wed, Oct 22, 2025 at 3:04 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Wed, 2025-10-22 at 17:54 +0000, Kumar Kartikeya Dwivedi wrote:
> > Introduce a new mode for the rqspinlock stress test that exercises a
> > deadlock that won't be detected by the AA and ABBA checks, such that we
> > always reliably trigger the timeout fallback. We need 4 CPUs for this
> > particular case, as CPU 0 is untouched, and three participant CPUs for
> > triggering the ABBCCA case.
> >
> > Refactor the lock acquisition paths in the module to better reflect the
> > three modes and choose the right lock depending on the context.
> >
> > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > ---
>
> The overhaul makes sense to me and the code is easy to follow.
> The only nit I have is that test does not fail if deadlock is not detected.
> E.g. if I remove raw_res_spin_unlock_irqrestore() call in nmi_cb(),
> there are stall splats in dmesg, but test harness reports success.
> I suggest adding some signal that all kthreads terminated successfully.
>
> Acked-by: Eduard Zingerman <eddyz87@gmail.com>
>
Maybe it should be another way around? The test must and should have
triggered deadlocks, so if we count how many times the return of
raw_res_spin_lock_irqrestore == -EDEADLK or -EITMEDOUT, the number
should be non-zero.
The test looks good to me otherwise.
Reviewed-by: Amery Hung <ameryhung@gmail.com>
> [...]
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH bpf-next v1] selftests/bpf: Add ABBCCA case for rqspinlock stress test
2025-10-24 16:42 ` Amery Hung
@ 2025-10-24 16:49 ` Eduard Zingerman
0 siblings, 0 replies; 4+ messages in thread
From: Eduard Zingerman @ 2025-10-24 16:49 UTC (permalink / raw)
To: Amery Hung
Cc: Kumar Kartikeya Dwivedi, bpf, Alexei Starovoitov, Andrii Nakryiko,
Daniel Borkmann, Martin KaFai Lau, kkd, kernel-team
On Fri, 2025-10-24 at 09:42 -0700, Amery Hung wrote:
> On Wed, Oct 22, 2025 at 3:04 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
> >
> > On Wed, 2025-10-22 at 17:54 +0000, Kumar Kartikeya Dwivedi wrote:
> > > Introduce a new mode for the rqspinlock stress test that exercises a
> > > deadlock that won't be detected by the AA and ABBA checks, such that we
> > > always reliably trigger the timeout fallback. We need 4 CPUs for this
> > > particular case, as CPU 0 is untouched, and three participant CPUs for
> > > triggering the ABBCCA case.
> > >
> > > Refactor the lock acquisition paths in the module to better reflect the
> > > three modes and choose the right lock depending on the context.
> > >
> > > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > > ---
> >
> > The overhaul makes sense to me and the code is easy to follow.
> > The only nit I have is that test does not fail if deadlock is not detected.
> > E.g. if I remove raw_res_spin_unlock_irqrestore() call in nmi_cb(),
> > there are stall splats in dmesg, but test harness reports success.
> > I suggest adding some signal that all kthreads terminated successfully.
> >
> > Acked-by: Eduard Zingerman <eddyz87@gmail.com>
> >
>
> Maybe it should be another way around? The test must and should have
> triggered deadlocks, so if we count how many times the return of
> raw_res_spin_lock_irqrestore == -EDEADLK or -EITMEDOUT, the number
> should be non-zero.
+1, that would be a good thing to check.
>
> The test looks good to me otherwise.
>
> Reviewed-by: Amery Hung <ameryhung@gmail.com>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2025-10-24 16:49 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-10-22 17:54 [PATCH bpf-next v1] selftests/bpf: Add ABBCCA case for rqspinlock stress test Kumar Kartikeya Dwivedi
2025-10-22 22:04 ` Eduard Zingerman
2025-10-24 16:42 ` Amery Hung
2025-10-24 16:49 ` Eduard Zingerman
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox