* [PATCH v2 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map
@ 2024-05-14 12:23 Siddharth Chintamaneni
2024-05-14 12:23 ` [PATCH v2 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps Siddharth Chintamaneni
2024-05-14 12:29 ` [PATCH v2 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Siddharth Chintamaneni
0 siblings, 2 replies; 3+ messages in thread
From: Siddharth Chintamaneni @ 2024-05-14 12:23 UTC (permalink / raw)
To: bpf
Cc: alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
rjsu26, sairoop, miloc, memxor, Siddharth Chintamaneni
Added selftests to check for nested deadlocks in queue and stack maps.
test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__open 0 nsec
test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__load 0 nsec
test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__attach 0 nsec
test_map_queue_stack_nesting_success:PASS:MAP Write 0 nsec
test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
384/1 test_queue_stack_nested_map/map_queue_nesting:OK
test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__open 0 nsec
test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__load 0 nsec
test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__attach 0 nsec
test_map_queue_stack_nesting_success:PASS:MAP Write 0 nsec
test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
384/2 test_queue_stack_nested_map/map_stack_nesting:OK
384 test_queue_stack_nested_map:OK
Summary: 1/2 PASSED, 0 SKIPPED, 0 FAILED
Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
---
samples/bpf/Makefile | 3 +
.../prog_tests/test_queue_stack_nested_map.c | 69 +++++++++++
.../bpf/progs/test_queue_stack_nested_map.c | 116 ++++++++++++++++++
3 files changed, 188 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c
create mode 100644 tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 9aa027b144df..9e1abf0e21ad 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -7,6 +7,7 @@ pound := \#
# List of programs to build
tprogs-y := test_lru_dist
+tprogs-y += sid_queue_stack
tprogs-y += sock_example
tprogs-y += fds_example
tprogs-y += sockex1
@@ -98,6 +99,7 @@ ibumad-objs := ibumad_user.o
hbm-objs := hbm.o $(CGROUP_HELPERS)
xdp_router_ipv4-objs := xdp_router_ipv4_user.o $(XDP_SAMPLE)
+sid_queue_stack-objs := sid_queue_stack_user.o
# Tell kbuild to always build the programs
always-y := $(tprogs-y)
@@ -149,6 +151,7 @@ always-y += task_fd_query_kern.o
always-y += ibumad_kern.o
always-y += hbm_out_kern.o
always-y += hbm_edt_kern.o
+always-y += sid_queue_stack_kern.o
TPROGS_CFLAGS = $(TPROGS_USER_CFLAGS)
TPROGS_LDFLAGS = $(TPROGS_USER_LDFLAGS)
diff --git a/tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c b/tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c
new file mode 100644
index 000000000000..fc46561788af
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "test_queue_stack_nested_map.skel.h"
+
+
+static void test_map_queue_stack_nesting_success(bool is_map_queue)
+{
+ struct test_queue_stack_nested_map *skel;
+ int err;
+
+ skel = test_queue_stack_nested_map__open();
+ if (!ASSERT_OK_PTR(skel, "test_queue_stack_nested_map__open"))
+ return;
+
+ err = test_queue_stack_nested_map__load(skel);
+ if (!ASSERT_OK(err, "test_queue_stack_nested_map__load"))
+ goto out;
+
+ skel->bss->pid = getpid();
+ err = test_queue_stack_nested_map__attach(skel);
+ if (!ASSERT_OK(err, "test_queue_stack_nested_map__attach"))
+ goto out;
+
+ /* trigger map from userspace to check nesting */
+ int value = 0;
+
+ do {
+ if (is_map_queue) {
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_queue),
+ NULL, &value, 0);
+ if (err < 0)
+ break;
+ err = bpf_map_lookup_and_delete_elem(bpf_map__fd(skel->maps.map_queue),
+ NULL, &value);
+ } else {
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_stack),
+ NULL, &value, 0);
+ if (err < 0)
+ break;
+ err = bpf_map_lookup_and_delete_elem(bpf_map__fd(skel->maps.map_stack),
+ NULL, &value);
+ }
+ } while (0);
+
+
+ if (!ASSERT_OK(err, "MAP Write"))
+ goto out;
+
+ if (is_map_queue) {
+ ASSERT_EQ(skel->bss->err_queue_push, -EBUSY, "no map nesting");
+ ASSERT_EQ(skel->bss->err_queue_pop, -EBUSY, "no map nesting");
+ } else {
+ ASSERT_EQ(skel->bss->err_stack, -EBUSY, "no map nesting");
+ }
+out:
+ test_queue_stack_nested_map__destroy(skel);
+}
+
+void test_test_queue_stack_nested_map(void)
+{
+ if (test__start_subtest("map_queue_nesting"))
+ test_map_queue_stack_nesting_success(true);
+ if (test__start_subtest("map_stack_nesting"))
+ test_map_queue_stack_nesting_success(false);
+
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c b/tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c
new file mode 100644
index 000000000000..893a37593206
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK);
+ __uint(max_entries, 32);
+ __uint(key_size, 0);
+ __uint(value_size, sizeof(__u32));
+} map_stack SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_QUEUE);
+ __uint(max_entries, 32);
+ __uint(key_size, 0);
+ __uint(value_size, sizeof(__u32));
+} map_queue SEC(".maps");
+
+
+int err_queue_push;
+int err_queue_pop;
+int err_stack;
+int pid;
+__u32 trigger_flag_queue_push;
+__u32 trigger_flag_queue_pop;
+__u32 trigger_flag_stack;
+
+SEC("fentry/queue_stack_map_push_elem")
+int BPF_PROG(test_queue_stack_push_trigger, raw_spinlock_t *lock, unsigned long flags)
+{
+
+ if ((bpf_get_current_pid_tgid() >> 32) != pid)
+ return 0;
+
+
+ trigger_flag_queue_push = 1;
+
+ return 0;
+}
+
+SEC("fentry/queue_map_pop_elem")
+int BPF_PROG(test_queue_pop_trigger, raw_spinlock_t *lock, unsigned long flags)
+{
+
+ if ((bpf_get_current_pid_tgid() >> 32) != pid)
+ return 0;
+
+ trigger_flag_queue_pop = 1;
+
+ return 0;
+}
+
+
+SEC("fentry/stack_map_pop_elem")
+int BPF_PROG(test_stack_pop_trigger, raw_spinlock_t *lock, unsigned long flags)
+{
+
+ if ((bpf_get_current_pid_tgid() >> 32) != pid)
+ return 0;
+
+ trigger_flag_stack = 1;
+
+ return 0;
+}
+
+SEC("fentry/_raw_spin_unlock_irqrestore")
+int BPF_PROG(test_queue_pop_nesting, raw_spinlock_t *lock, unsigned long flags)
+{
+ __u32 val;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != pid || trigger_flag_queue_pop != 1)
+ return 0;
+
+
+ err_queue_pop = bpf_map_pop_elem(&map_queue, &val);
+
+ trigger_flag_queue_pop = 0;
+
+ return 0;
+}
+
+SEC("fentry/_raw_spin_unlock_irqrestore")
+int BPF_PROG(test_stack_nesting, raw_spinlock_t *lock, unsigned long flags)
+{
+ __u32 val;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != pid || trigger_flag_stack != 1)
+ return 0;
+
+
+ err_stack = bpf_map_pop_elem(&map_stack, &val);
+
+ trigger_flag_stack = 0;
+
+ return 0;
+}
+
+
+SEC("fentry/_raw_spin_unlock_irqrestore")
+int BPF_PROG(test_queue_push_nesting, raw_spinlock_t *lock, unsigned long flags)
+{
+ __u32 val = 1;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != pid || trigger_flag_queue_push != 1) {
+ return 0;
+ }
+
+ err_queue_push = bpf_map_push_elem(&map_queue, &val, 0);
+
+ trigger_flag_queue_push = 0;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
--
2.44.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH v2 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps
2024-05-14 12:23 [PATCH v2 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Siddharth Chintamaneni
@ 2024-05-14 12:23 ` Siddharth Chintamaneni
2024-05-14 12:29 ` [PATCH v2 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Siddharth Chintamaneni
1 sibling, 0 replies; 3+ messages in thread
From: Siddharth Chintamaneni @ 2024-05-14 12:23 UTC (permalink / raw)
To: bpf
Cc: alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
rjsu26, sairoop, miloc, memxor, Siddharth Chintamaneni,
syzbot+8bdfc2c53fb2b63e1871
This patch is a revised version which addresses a possible deadlock issue in
queue and stack map types.
Deadlock could happen when a nested BPF program acquires the same lock
as the parent BPF program to perform a write operation on the same map
as the first one. This bug is also reported by syzbot.
Link: https://lore.kernel.org/lkml/0000000000004c3fc90615f37756@google.com/
Reported-by: syzbot+8bdfc2c53fb2b63e1871@syzkaller.appspotmail.com
Fixes: f1a2e44a3aec ("bpf: add queue and stack maps")
Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
---
kernel/bpf/queue_stack_maps.c | 76 +++++++++++++++++++++++++++++++++--
1 file changed, 73 insertions(+), 3 deletions(-)
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index d869f51ea93a..b5ed76c9ddd7 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -13,11 +13,13 @@
#define QUEUE_STACK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
+
struct bpf_queue_stack {
struct bpf_map map;
raw_spinlock_t lock;
u32 head, tail;
u32 size; /* max_entries + 1 */
+ int __percpu *map_locked;
char elements[] __aligned(8);
};
@@ -78,6 +80,15 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
qs->size = size;
+ qs->map_locked = bpf_map_alloc_percpu(&qs->map,
+ sizeof(int),
+ sizeof(int),
+ GFP_USER | __GFP_NOWARN);
+ if (!qs->map_locked) {
+ bpf_map_area_free(qs);
+ return ERR_PTR(-ENOMEM);
+ }
+
raw_spin_lock_init(&qs->lock);
return &qs->map;
@@ -88,19 +99,57 @@ static void queue_stack_map_free(struct bpf_map *map)
{
struct bpf_queue_stack *qs = bpf_queue_stack(map);
+ free_percpu(qs->map_locked);
bpf_map_area_free(qs);
}
+static inline int map_lock_inc(struct bpf_queue_stack *qs)
+{
+ unsigned long flags;
+
+ preempt_disable();
+ local_irq_save(flags);
+ if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
+ __this_cpu_dec(*(qs->map_locked));
+ local_irq_restore(flags);
+ preempt_enable();
+ return -EBUSY;
+ }
+
+ local_irq_restore(flags);
+ preempt_enable();
+
+ return 0;
+}
+
+static inline void map_unlock_dec(struct bpf_queue_stack *qs)
+{
+ unsigned long flags;
+
+ preempt_disable();
+ local_irq_save(flags);
+ __this_cpu_dec(*(qs->map_locked));
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
{
struct bpf_queue_stack *qs = bpf_queue_stack(map);
unsigned long flags;
int err = 0;
void *ptr;
+ int ret;
+
+ ret = map_lock_inc(qs);
+ if (ret)
+ return ret;
if (in_nmi()) {
- if (!raw_spin_trylock_irqsave(&qs->lock, flags))
+ if (!raw_spin_trylock_irqsave(&qs->lock, flags)) {
+ map_unlock_dec(qs);
return -EBUSY;
+ }
} else {
raw_spin_lock_irqsave(&qs->lock, flags);
}
@@ -121,6 +170,8 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
out:
raw_spin_unlock_irqrestore(&qs->lock, flags);
+ map_unlock_dec(qs);
+
return err;
}
@@ -132,10 +183,17 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
int err = 0;
void *ptr;
u32 index;
+ int ret;
+
+ ret = map_lock_inc(qs);
+ if (ret)
+ return ret;
if (in_nmi()) {
- if (!raw_spin_trylock_irqsave(&qs->lock, flags))
+ if (!raw_spin_trylock_irqsave(&qs->lock, flags)) {
+ map_unlock_dec(qs);
return -EBUSY;
+ }
} else {
raw_spin_lock_irqsave(&qs->lock, flags);
}
@@ -158,6 +216,8 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
out:
raw_spin_unlock_irqrestore(&qs->lock, flags);
+ map_unlock_dec(qs);
+
return err;
}
@@ -193,6 +253,7 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
unsigned long irq_flags;
int err = 0;
void *dst;
+ int ret;
/* BPF_EXIST is used to force making room for a new element in case the
* map is full
@@ -203,9 +264,16 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
if (flags & BPF_NOEXIST || flags > BPF_EXIST)
return -EINVAL;
+
+ ret = map_lock_inc(qs);
+ if (ret)
+ return ret;
+
if (in_nmi()) {
- if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
+ if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags)) {
+ map_unlock_dec(qs);
return -EBUSY;
+ }
} else {
raw_spin_lock_irqsave(&qs->lock, irq_flags);
}
@@ -228,6 +296,8 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
out:
raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
+ map_unlock_dec(qs);
+
return err;
}
--
2.44.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH v2 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map
2024-05-14 12:23 [PATCH v2 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Siddharth Chintamaneni
2024-05-14 12:23 ` [PATCH v2 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps Siddharth Chintamaneni
@ 2024-05-14 12:29 ` Siddharth Chintamaneni
1 sibling, 0 replies; 3+ messages in thread
From: Siddharth Chintamaneni @ 2024-05-14 12:29 UTC (permalink / raw)
To: bpf
On Tue, 14 May 2024 at 08:23, Siddharth Chintamaneni
<sidchintamaneni@gmail.com> wrote:
>
> Added selftests to check for nested deadlocks in queue and stack maps.
>
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__open 0 nsec
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__load 0 nsec
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__attach 0 nsec
> test_map_queue_stack_nesting_success:PASS:MAP Write 0 nsec
> test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
> test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
> 384/1 test_queue_stack_nested_map/map_queue_nesting:OK
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__open 0 nsec
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__load 0 nsec
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__attach 0 nsec
> test_map_queue_stack_nesting_success:PASS:MAP Write 0 nsec
> test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
> 384/2 test_queue_stack_nested_map/map_stack_nesting:OK
> 384 test_queue_stack_nested_map:OK
> Summary: 1/2 PASSED, 0 SKIPPED, 0 FAILED
>
> Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
> ---
> samples/bpf/Makefile | 3 +
Somehow my test environment file got sneaked in. I will remove this
file and send a revised one for selftests.
> .../prog_tests/test_queue_stack_nested_map.c | 69 +++++++++++
> .../bpf/progs/test_queue_stack_nested_map.c | 116 ++++++++++++++++++
> 3 files changed, 188 insertions(+)
> create mode 100644 tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c
> create mode 100644 tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c
>
> diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
> index 9aa027b144df..9e1abf0e21ad 100644
> --- a/samples/bpf/Makefile
> +++ b/samples/bpf/Makefile
> @@ -7,6 +7,7 @@ pound := \#
>
> # List of programs to build
> tprogs-y := test_lru_dist
> +tprogs-y += sid_queue_stack
> tprogs-y += sock_example
> tprogs-y += fds_example
> tprogs-y += sockex1
> @@ -98,6 +99,7 @@ ibumad-objs := ibumad_user.o
> hbm-objs := hbm.o $(CGROUP_HELPERS)
>
> xdp_router_ipv4-objs := xdp_router_ipv4_user.o $(XDP_SAMPLE)
> +sid_queue_stack-objs := sid_queue_stack_user.o
>
> # Tell kbuild to always build the programs
> always-y := $(tprogs-y)
> @@ -149,6 +151,7 @@ always-y += task_fd_query_kern.o
> always-y += ibumad_kern.o
> always-y += hbm_out_kern.o
> always-y += hbm_edt_kern.o
> +always-y += sid_queue_stack_kern.o
>
> TPROGS_CFLAGS = $(TPROGS_USER_CFLAGS)
> TPROGS_LDFLAGS = $(TPROGS_USER_LDFLAGS)
> diff --git a/tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c b/tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c
> new file mode 100644
> index 000000000000..fc46561788af
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c
> @@ -0,0 +1,69 @@
> +// SPDX-License-Identifier: GPL-2.0
> +#include <test_progs.h>
> +#include <network_helpers.h>
> +
> +#include "test_queue_stack_nested_map.skel.h"
> +
> +
> +static void test_map_queue_stack_nesting_success(bool is_map_queue)
> +{
> + struct test_queue_stack_nested_map *skel;
> + int err;
> +
> + skel = test_queue_stack_nested_map__open();
> + if (!ASSERT_OK_PTR(skel, "test_queue_stack_nested_map__open"))
> + return;
> +
> + err = test_queue_stack_nested_map__load(skel);
> + if (!ASSERT_OK(err, "test_queue_stack_nested_map__load"))
> + goto out;
> +
> + skel->bss->pid = getpid();
> + err = test_queue_stack_nested_map__attach(skel);
> + if (!ASSERT_OK(err, "test_queue_stack_nested_map__attach"))
> + goto out;
> +
> + /* trigger map from userspace to check nesting */
> + int value = 0;
> +
> + do {
> + if (is_map_queue) {
> + err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_queue),
> + NULL, &value, 0);
> + if (err < 0)
> + break;
> + err = bpf_map_lookup_and_delete_elem(bpf_map__fd(skel->maps.map_queue),
> + NULL, &value);
> + } else {
> + err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_stack),
> + NULL, &value, 0);
> + if (err < 0)
> + break;
> + err = bpf_map_lookup_and_delete_elem(bpf_map__fd(skel->maps.map_stack),
> + NULL, &value);
> + }
> + } while (0);
> +
> +
> + if (!ASSERT_OK(err, "MAP Write"))
> + goto out;
> +
> + if (is_map_queue) {
> + ASSERT_EQ(skel->bss->err_queue_push, -EBUSY, "no map nesting");
> + ASSERT_EQ(skel->bss->err_queue_pop, -EBUSY, "no map nesting");
> + } else {
> + ASSERT_EQ(skel->bss->err_stack, -EBUSY, "no map nesting");
> + }
> +out:
> + test_queue_stack_nested_map__destroy(skel);
> +}
> +
> +void test_test_queue_stack_nested_map(void)
> +{
> + if (test__start_subtest("map_queue_nesting"))
> + test_map_queue_stack_nesting_success(true);
> + if (test__start_subtest("map_stack_nesting"))
> + test_map_queue_stack_nesting_success(false);
> +
> +}
> +
> diff --git a/tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c b/tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c
> new file mode 100644
> index 000000000000..893a37593206
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c
> @@ -0,0 +1,116 @@
> +// SPDX-License-Identifier: GPL-2.0
> +#include "vmlinux.h"
> +#include <bpf/bpf_helpers.h>
> +#include <bpf/bpf_tracing.h>
> +
> +struct {
> + __uint(type, BPF_MAP_TYPE_STACK);
> + __uint(max_entries, 32);
> + __uint(key_size, 0);
> + __uint(value_size, sizeof(__u32));
> +} map_stack SEC(".maps");
> +
> +struct {
> + __uint(type, BPF_MAP_TYPE_QUEUE);
> + __uint(max_entries, 32);
> + __uint(key_size, 0);
> + __uint(value_size, sizeof(__u32));
> +} map_queue SEC(".maps");
> +
> +
> +int err_queue_push;
> +int err_queue_pop;
> +int err_stack;
> +int pid;
> +__u32 trigger_flag_queue_push;
> +__u32 trigger_flag_queue_pop;
> +__u32 trigger_flag_stack;
> +
> +SEC("fentry/queue_stack_map_push_elem")
> +int BPF_PROG(test_queue_stack_push_trigger, raw_spinlock_t *lock, unsigned long flags)
> +{
> +
> + if ((bpf_get_current_pid_tgid() >> 32) != pid)
> + return 0;
> +
> +
> + trigger_flag_queue_push = 1;
> +
> + return 0;
> +}
> +
> +SEC("fentry/queue_map_pop_elem")
> +int BPF_PROG(test_queue_pop_trigger, raw_spinlock_t *lock, unsigned long flags)
> +{
> +
> + if ((bpf_get_current_pid_tgid() >> 32) != pid)
> + return 0;
> +
> + trigger_flag_queue_pop = 1;
> +
> + return 0;
> +}
> +
> +
> +SEC("fentry/stack_map_pop_elem")
> +int BPF_PROG(test_stack_pop_trigger, raw_spinlock_t *lock, unsigned long flags)
> +{
> +
> + if ((bpf_get_current_pid_tgid() >> 32) != pid)
> + return 0;
> +
> + trigger_flag_stack = 1;
> +
> + return 0;
> +}
> +
> +SEC("fentry/_raw_spin_unlock_irqrestore")
> +int BPF_PROG(test_queue_pop_nesting, raw_spinlock_t *lock, unsigned long flags)
> +{
> + __u32 val;
> +
> + if ((bpf_get_current_pid_tgid() >> 32) != pid || trigger_flag_queue_pop != 1)
> + return 0;
> +
> +
> + err_queue_pop = bpf_map_pop_elem(&map_queue, &val);
> +
> + trigger_flag_queue_pop = 0;
> +
> + return 0;
> +}
> +
> +SEC("fentry/_raw_spin_unlock_irqrestore")
> +int BPF_PROG(test_stack_nesting, raw_spinlock_t *lock, unsigned long flags)
> +{
> + __u32 val;
> +
> + if ((bpf_get_current_pid_tgid() >> 32) != pid || trigger_flag_stack != 1)
> + return 0;
> +
> +
> + err_stack = bpf_map_pop_elem(&map_stack, &val);
> +
> + trigger_flag_stack = 0;
> +
> + return 0;
> +}
> +
> +
> +SEC("fentry/_raw_spin_unlock_irqrestore")
> +int BPF_PROG(test_queue_push_nesting, raw_spinlock_t *lock, unsigned long flags)
> +{
> + __u32 val = 1;
> +
> + if ((bpf_get_current_pid_tgid() >> 32) != pid || trigger_flag_queue_push != 1) {
> + return 0;
> + }
> +
> + err_queue_push = bpf_map_push_elem(&map_queue, &val, 0);
> +
> + trigger_flag_queue_push = 0;
> +
> + return 0;
> +}
> +
> +char _license[] SEC("license") = "GPL";
> --
> 2.44.0
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2024-05-14 12:29 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-05-14 12:23 [PATCH v2 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Siddharth Chintamaneni
2024-05-14 12:23 ` [PATCH v2 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps Siddharth Chintamaneni
2024-05-14 12:29 ` [PATCH v2 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Siddharth Chintamaneni
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox