netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Possible deadlock in bpf queue map
@ 2023-09-07  5:02 Hsin-Wei Hung
  2023-09-07 10:26 ` Toke Høiland-Jørgensen
  0 siblings, 1 reply; 9+ messages in thread
From: Hsin-Wei Hung @ 2023-09-07  5:02 UTC (permalink / raw)
  To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, netdev, bpf

Hi,

Our bpf fuzzer, a customized Syzkaller, triggered a lockdep warning in
the bpf queue map in v5.15. Since queue_stack_maps.c has no major changes
since v5.15, we think this should still exist in the latest kernel.
The bug can be occasionally triggered, and we suspect one of the
eBPF programs involved to be the following one. We also attached the lockdep
warning at the end.

#define DEFINE_BPF_MAP_NO_KEY(the_map, TypeOfMap, MapFlags,
TypeOfValue, MaxEntries) \
        struct {                                                        \
            __uint(type, TypeOfMap);                                    \
            __uint(map_flags, (MapFlags));                              \
            __uint(max_entries, (MaxEntries));                          \
            __type(value, TypeOfValue);                                 \
        } the_map SEC(".maps");

DEFINE_BPF_MAP_NO_KEY(map_0, BPF_MAP_TYPE_QUEUE, 0 | BPF_F_WRONLY,
struct_0, 162);
SEC("perf_event")
int func(struct bpf_perf_event_data *ctx) {
        char v0[96] = {};
        uint64_t v1 = 0;
        v1 = bpf_map_pop_elem(&map_0, v0);
        return 163819661;
}


The program is attached to the following perf event.

struct perf_event_attr attr_type_hw = {
        .type = PERF_TYPE_HARDWARE,
        .config = PERF_COUNT_HW_CPU_CYCLES,
        .sample_freq = 50,
        .inherit = 1,
        .freq = 1,
};

================================WARNING: inconsistent lock state
5.15.26+ #2 Not tainted
--------------------------------
inconsistent {INITIAL USE} -> {IN-NMI} usage.
syz-executor.5/19749 [HC1[1]:SC0[0]:HE0:SE1] takes:
ffff88804c9fc198 (&qs->lock){..-.}-{2:2}, at: __queue_map_get+0x31/0x250
{INITIAL USE} state was registered at:
  lock_acquire+0x1a3/0x4b0
  _raw_spin_lock_irqsave+0x48/0x60
  __queue_map_get+0x31/0x250
  bpf_prog_577904e86c81dead_func+0x12/0x4b4
  trace_call_bpf+0x262/0x5d0
  perf_trace_run_bpf_submit+0x91/0x1c0
  perf_trace_sched_switch+0x46c/0x700
  __schedule+0x11b5/0x24a0
  schedule+0xd4/0x270
  futex_wait_queue_me+0x25f/0x520
  futex_wait+0x1e0/0x5f0
  do_futex+0x395/0x1890
  __x64_sys_futex+0x1cb/0x480
  do_syscall_64+0x3b/0xc0
  entry_SYSCALL_64_after_hwframe+0x44/0xae
irq event stamp: 13640
hardirqs last  enabled at (13639): [<ffffffff95eb2bf4>]
_raw_spin_unlock_irq+0x24/0x40
hardirqs last disabled at (13640): [<ffffffff95eb2d4d>]
_raw_spin_lock_irqsave+0x5d/0x60
softirqs last  enabled at (13464): [<ffffffff93e26de5>] __sys_bpf+0x3e15/0x4e80
softirqs last disabled at (13462): [<ffffffff93e26da3>] __sys_bpf+0x3dd3/0x4e80

other info that might help us debug this:
 Possible unsafe locking scenario:

       CPU0
       ----
  lock(&qs->lock);
  <Interrupt>
    lock(&qs->lock);

 *** DEADLOCK ***

3 locks held by syz-executor.5/19749:
 #0: ffff88801904dd80 (&sb->s_type->i_mutex_key#17){++++}-{3:3}, at:
path_openat+0x1585/0x2760
 #1: ffff88800924c020 (&chan->lock){-.-.}-{2:2}, at:
p9_virtio_request+0x18c/0x640
 #2: ffffffff977d32e0 (rcu_read_lock){....}-{1:2}, at:
is_bpf_text_address+0x5/0x180

stack backtrace:
CPU: 0 PID: 19749 Comm: syz-executor.5 Not tainted 5.15.26+ #2
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
1.13.0-1ubuntu1.1 04/01/2014
Call Trace:
 <NMI>
 dump_stack_lvl+0x9d/0xc9
 lock_acquire.cold+0x3b/0x40
 ? lock_release+0x6c0/0x6c0
 ? __queue_map_get+0x31/0x250
 ? __perf_event_header__init_id+0x2bd/0x610
 ? look_up_lock_class+0x76/0xd0
 ? lock_acquire+0x282/0x4b0
 _raw_spin_lock_irqsave+0x48/0x60
 ? __queue_map_get+0x31/0x250
 __queue_map_get+0x31/0x250
 bpf_prog_9e60c62721f22dfa_func+0x5a/0xc18
 bpf_overflow_handler+0x17b/0x460
 ? perf_output_read+0x12b0/0x12b0
 ? __perf_event_account_interrupt+0xe9/0x3a0
 __perf_event_overflow+0x13f/0x3c0
 handle_pmi_common+0x59b/0x980
 ? intel_pmu_save_and_restart+0x100/0x100
 ? intel_bts_interrupt+0x10d/0x3d0
 intel_pmu_handle_irq+0x28a/0x8b0
 perf_event_nmi_handler+0x4d/0x70
 nmi_handle+0x147/0x390
 default_do_nmi+0x40/0x100
 exc_nmi+0x152/0x170
 end_repeat_nmi+0x16/0x55
RIP: 0010:lock_acquire+0x1b4/0x4b0
Code: f7 6a 00 45 0f b6 c9 6a 00 ff b4 24 f8 00 00 00 ff 74 24 18 e8
cd 98 ff ff b8 ff ff ff ff 48 83 c4 20 65 0f c1 05 fc 19 50 6c <83> f8
01 0f 85 7f 02 00 00 48 83 7c 24 08 00 0f 85 50 02 00 00 48
RSP: 0018:ffff88802825ec30 EFLAGS: 00000057
RAX: 0000000000000001 RBX: 1ffff1100504bd88 RCX: 00000000dea5863f
RDX: 1ffff110053cd1b6 RSI: 1ffff1100504bd6f RDI: 00000000fe94d1a3
RBP: 0000000000000000 R08: 0000000000000000 R09: ffffffff99172ba7
R10: fffffbfff322e574 R11: 0000000000000001 R12: 0000000000000002
R13: 0000000000000000 R14: ffffffff977d32e0 R15: 0000000000000000
 ? lock_acquire+0x1b4/0x4b0
 ? lock_acquire+0x1b4/0x4b0
 </NMI>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: Possible deadlock in bpf queue map
  2023-09-07  5:02 Possible deadlock in bpf queue map Hsin-Wei Hung
@ 2023-09-07 10:26 ` Toke Høiland-Jørgensen
  2023-09-07 11:13   ` Kumar Kartikeya Dwivedi
  0 siblings, 1 reply; 9+ messages in thread
From: Toke Høiland-Jørgensen @ 2023-09-07 10:26 UTC (permalink / raw)
  To: Hsin-Wei Hung, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Martin KaFai Lau, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, netdev, bpf, Arnaldo Carvalho de Melo

+Arnaldo

> Hi,
>
> Our bpf fuzzer, a customized Syzkaller, triggered a lockdep warning in
> the bpf queue map in v5.15. Since queue_stack_maps.c has no major changes
> since v5.15, we think this should still exist in the latest kernel.
> The bug can be occasionally triggered, and we suspect one of the
> eBPF programs involved to be the following one. We also attached the lockdep
> warning at the end.
>
> #define DEFINE_BPF_MAP_NO_KEY(the_map, TypeOfMap, MapFlags,
> TypeOfValue, MaxEntries) \
>         struct {                                                        \
>             __uint(type, TypeOfMap);                                    \
>             __uint(map_flags, (MapFlags));                              \
>             __uint(max_entries, (MaxEntries));                          \
>             __type(value, TypeOfValue);                                 \
>         } the_map SEC(".maps");
>
> DEFINE_BPF_MAP_NO_KEY(map_0, BPF_MAP_TYPE_QUEUE, 0 | BPF_F_WRONLY,
> struct_0, 162);
> SEC("perf_event")
> int func(struct bpf_perf_event_data *ctx) {
>         char v0[96] = {};
>         uint64_t v1 = 0;
>         v1 = bpf_map_pop_elem(&map_0, v0);
>         return 163819661;
> }
>
>
> The program is attached to the following perf event.
>
> struct perf_event_attr attr_type_hw = {
>         .type = PERF_TYPE_HARDWARE,
>         .config = PERF_COUNT_HW_CPU_CYCLES,
>         .sample_freq = 50,
>         .inherit = 1,
>         .freq = 1,
> };
>
> ================================WARNING: inconsistent lock state
> 5.15.26+ #2 Not tainted
> --------------------------------
> inconsistent {INITIAL USE} -> {IN-NMI} usage.
> syz-executor.5/19749 [HC1[1]:SC0[0]:HE0:SE1] takes:
> ffff88804c9fc198 (&qs->lock){..-.}-{2:2}, at: __queue_map_get+0x31/0x250
> {INITIAL USE} state was registered at:
>   lock_acquire+0x1a3/0x4b0
>   _raw_spin_lock_irqsave+0x48/0x60
>   __queue_map_get+0x31/0x250
>   bpf_prog_577904e86c81dead_func+0x12/0x4b4
>   trace_call_bpf+0x262/0x5d0
>   perf_trace_run_bpf_submit+0x91/0x1c0
>   perf_trace_sched_switch+0x46c/0x700
>   __schedule+0x11b5/0x24a0
>   schedule+0xd4/0x270
>   futex_wait_queue_me+0x25f/0x520
>   futex_wait+0x1e0/0x5f0
>   do_futex+0x395/0x1890
>   __x64_sys_futex+0x1cb/0x480
>   do_syscall_64+0x3b/0xc0
>   entry_SYSCALL_64_after_hwframe+0x44/0xae
> irq event stamp: 13640
> hardirqs last  enabled at (13639): [<ffffffff95eb2bf4>]
> _raw_spin_unlock_irq+0x24/0x40
> hardirqs last disabled at (13640): [<ffffffff95eb2d4d>]
> _raw_spin_lock_irqsave+0x5d/0x60
> softirqs last  enabled at (13464): [<ffffffff93e26de5>] __sys_bpf+0x3e15/0x4e80
> softirqs last disabled at (13462): [<ffffffff93e26da3>] __sys_bpf+0x3dd3/0x4e80
>
> other info that might help us debug this:
>  Possible unsafe locking scenario:
>
>        CPU0
>        ----
>   lock(&qs->lock);
>   <Interrupt>
>     lock(&qs->lock);

Hmm, so that lock() uses raw_spin_lock_irqsave(), which *should* be
disabling interrupts entirely for the critical section. But I guess a
Perf hardware event can still trigger? Which seems like it would
potentially wreak havoc with lots of things, not just this queue map
function?

No idea how to protect against this, though. Hoping Arnaldo knows? :)

-Toke

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: Possible deadlock in bpf queue map
  2023-09-07 10:26 ` Toke Høiland-Jørgensen
@ 2023-09-07 11:13   ` Kumar Kartikeya Dwivedi
  2023-09-07 13:04     ` Toke Høiland-Jørgensen
  0 siblings, 1 reply; 9+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2023-09-07 11:13 UTC (permalink / raw)
  To: Toke Høiland-Jørgensen
  Cc: Hsin-Wei Hung, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Martin KaFai Lau, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, netdev, bpf, Arnaldo Carvalho de Melo

On Thu, 7 Sept 2023 at 12:26, Toke Høiland-Jørgensen <toke@kernel.org> wrote:
>
> +Arnaldo
>
> > Hi,
> >
> > Our bpf fuzzer, a customized Syzkaller, triggered a lockdep warning in
> > the bpf queue map in v5.15. Since queue_stack_maps.c has no major changes
> > since v5.15, we think this should still exist in the latest kernel.
> > The bug can be occasionally triggered, and we suspect one of the
> > eBPF programs involved to be the following one. We also attached the lockdep
> > warning at the end.
> >
> > #define DEFINE_BPF_MAP_NO_KEY(the_map, TypeOfMap, MapFlags,
> > TypeOfValue, MaxEntries) \
> >         struct {                                                        \
> >             __uint(type, TypeOfMap);                                    \
> >             __uint(map_flags, (MapFlags));                              \
> >             __uint(max_entries, (MaxEntries));                          \
> >             __type(value, TypeOfValue);                                 \
> >         } the_map SEC(".maps");
> >
> > DEFINE_BPF_MAP_NO_KEY(map_0, BPF_MAP_TYPE_QUEUE, 0 | BPF_F_WRONLY,
> > struct_0, 162);
> > SEC("perf_event")
> > int func(struct bpf_perf_event_data *ctx) {
> >         char v0[96] = {};
> >         uint64_t v1 = 0;
> >         v1 = bpf_map_pop_elem(&map_0, v0);
> >         return 163819661;
> > }
> >
> >
> > The program is attached to the following perf event.
> >
> > struct perf_event_attr attr_type_hw = {
> >         .type = PERF_TYPE_HARDWARE,
> >         .config = PERF_COUNT_HW_CPU_CYCLES,
> >         .sample_freq = 50,
> >         .inherit = 1,
> >         .freq = 1,
> > };
> >
> > ================================WARNING: inconsistent lock state
> > 5.15.26+ #2 Not tainted
> > --------------------------------
> > inconsistent {INITIAL USE} -> {IN-NMI} usage.
> > syz-executor.5/19749 [HC1[1]:SC0[0]:HE0:SE1] takes:
> > ffff88804c9fc198 (&qs->lock){..-.}-{2:2}, at: __queue_map_get+0x31/0x250
> > {INITIAL USE} state was registered at:
> >   lock_acquire+0x1a3/0x4b0
> >   _raw_spin_lock_irqsave+0x48/0x60
> >   __queue_map_get+0x31/0x250
> >   bpf_prog_577904e86c81dead_func+0x12/0x4b4
> >   trace_call_bpf+0x262/0x5d0
> >   perf_trace_run_bpf_submit+0x91/0x1c0
> >   perf_trace_sched_switch+0x46c/0x700
> >   __schedule+0x11b5/0x24a0
> >   schedule+0xd4/0x270
> >   futex_wait_queue_me+0x25f/0x520
> >   futex_wait+0x1e0/0x5f0
> >   do_futex+0x395/0x1890
> >   __x64_sys_futex+0x1cb/0x480
> >   do_syscall_64+0x3b/0xc0
> >   entry_SYSCALL_64_after_hwframe+0x44/0xae
> > irq event stamp: 13640
> > hardirqs last  enabled at (13639): [<ffffffff95eb2bf4>]
> > _raw_spin_unlock_irq+0x24/0x40
> > hardirqs last disabled at (13640): [<ffffffff95eb2d4d>]
> > _raw_spin_lock_irqsave+0x5d/0x60
> > softirqs last  enabled at (13464): [<ffffffff93e26de5>] __sys_bpf+0x3e15/0x4e80
> > softirqs last disabled at (13462): [<ffffffff93e26da3>] __sys_bpf+0x3dd3/0x4e80
> >
> > other info that might help us debug this:
> >  Possible unsafe locking scenario:
> >
> >        CPU0
> >        ----
> >   lock(&qs->lock);
> >   <Interrupt>
> >     lock(&qs->lock);
>
> Hmm, so that lock() uses raw_spin_lock_irqsave(), which *should* be
> disabling interrupts entirely for the critical section. But I guess a
> Perf hardware event can still trigger? Which seems like it would
> potentially wreak havoc with lots of things, not just this queue map
> function?
>
> No idea how to protect against this, though. Hoping Arnaldo knows? :)
>

The locking should probably be protected by a percpu integer counter,
incremented and decremented before and after the lock is taken,
respectively. If it is already non-zero, then -EBUSY should be
returned. It is similar to what htab_lock_bucket protects against in
hashtab.c.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: Possible deadlock in bpf queue map
  2023-09-07 11:13   ` Kumar Kartikeya Dwivedi
@ 2023-09-07 13:04     ` Toke Høiland-Jørgensen
  2023-09-07 15:40       ` Alexei Starovoitov
  2023-09-08  0:56       ` Hou Tao
  0 siblings, 2 replies; 9+ messages in thread
From: Toke Høiland-Jørgensen @ 2023-09-07 13:04 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: Hsin-Wei Hung, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Martin KaFai Lau, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, netdev, bpf, Arnaldo Carvalho de Melo

Kumar Kartikeya Dwivedi <memxor@gmail.com> writes:

> On Thu, 7 Sept 2023 at 12:26, Toke Høiland-Jørgensen <toke@kernel.org> wrote:
>>
>> +Arnaldo
>>
>> > Hi,
>> >
>> > Our bpf fuzzer, a customized Syzkaller, triggered a lockdep warning in
>> > the bpf queue map in v5.15. Since queue_stack_maps.c has no major changes
>> > since v5.15, we think this should still exist in the latest kernel.
>> > The bug can be occasionally triggered, and we suspect one of the
>> > eBPF programs involved to be the following one. We also attached the lockdep
>> > warning at the end.
>> >
>> > #define DEFINE_BPF_MAP_NO_KEY(the_map, TypeOfMap, MapFlags,
>> > TypeOfValue, MaxEntries) \
>> >         struct {                                                        \
>> >             __uint(type, TypeOfMap);                                    \
>> >             __uint(map_flags, (MapFlags));                              \
>> >             __uint(max_entries, (MaxEntries));                          \
>> >             __type(value, TypeOfValue);                                 \
>> >         } the_map SEC(".maps");
>> >
>> > DEFINE_BPF_MAP_NO_KEY(map_0, BPF_MAP_TYPE_QUEUE, 0 | BPF_F_WRONLY,
>> > struct_0, 162);
>> > SEC("perf_event")
>> > int func(struct bpf_perf_event_data *ctx) {
>> >         char v0[96] = {};
>> >         uint64_t v1 = 0;
>> >         v1 = bpf_map_pop_elem(&map_0, v0);
>> >         return 163819661;
>> > }
>> >
>> >
>> > The program is attached to the following perf event.
>> >
>> > struct perf_event_attr attr_type_hw = {
>> >         .type = PERF_TYPE_HARDWARE,
>> >         .config = PERF_COUNT_HW_CPU_CYCLES,
>> >         .sample_freq = 50,
>> >         .inherit = 1,
>> >         .freq = 1,
>> > };
>> >
>> > ================================WARNING: inconsistent lock state
>> > 5.15.26+ #2 Not tainted
>> > --------------------------------
>> > inconsistent {INITIAL USE} -> {IN-NMI} usage.
>> > syz-executor.5/19749 [HC1[1]:SC0[0]:HE0:SE1] takes:
>> > ffff88804c9fc198 (&qs->lock){..-.}-{2:2}, at: __queue_map_get+0x31/0x250
>> > {INITIAL USE} state was registered at:
>> >   lock_acquire+0x1a3/0x4b0
>> >   _raw_spin_lock_irqsave+0x48/0x60
>> >   __queue_map_get+0x31/0x250
>> >   bpf_prog_577904e86c81dead_func+0x12/0x4b4
>> >   trace_call_bpf+0x262/0x5d0
>> >   perf_trace_run_bpf_submit+0x91/0x1c0
>> >   perf_trace_sched_switch+0x46c/0x700
>> >   __schedule+0x11b5/0x24a0
>> >   schedule+0xd4/0x270
>> >   futex_wait_queue_me+0x25f/0x520
>> >   futex_wait+0x1e0/0x5f0
>> >   do_futex+0x395/0x1890
>> >   __x64_sys_futex+0x1cb/0x480
>> >   do_syscall_64+0x3b/0xc0
>> >   entry_SYSCALL_64_after_hwframe+0x44/0xae
>> > irq event stamp: 13640
>> > hardirqs last  enabled at (13639): [<ffffffff95eb2bf4>]
>> > _raw_spin_unlock_irq+0x24/0x40
>> > hardirqs last disabled at (13640): [<ffffffff95eb2d4d>]
>> > _raw_spin_lock_irqsave+0x5d/0x60
>> > softirqs last  enabled at (13464): [<ffffffff93e26de5>] __sys_bpf+0x3e15/0x4e80
>> > softirqs last disabled at (13462): [<ffffffff93e26da3>] __sys_bpf+0x3dd3/0x4e80
>> >
>> > other info that might help us debug this:
>> >  Possible unsafe locking scenario:
>> >
>> >        CPU0
>> >        ----
>> >   lock(&qs->lock);
>> >   <Interrupt>
>> >     lock(&qs->lock);
>>
>> Hmm, so that lock() uses raw_spin_lock_irqsave(), which *should* be
>> disabling interrupts entirely for the critical section. But I guess a
>> Perf hardware event can still trigger? Which seems like it would
>> potentially wreak havoc with lots of things, not just this queue map
>> function?
>>
>> No idea how to protect against this, though. Hoping Arnaldo knows? :)
>>
>
> The locking should probably be protected by a percpu integer counter,
> incremented and decremented before and after the lock is taken,
> respectively. If it is already non-zero, then -EBUSY should be
> returned. It is similar to what htab_lock_bucket protects against in
> hashtab.c.

Ah, neat! Okay, seems straight-forward enough to replicate. Hsin, could
you please check if the patch below gets rid of the splat?

-Toke


diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 8d2ddcb7566b..f96945311eec 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -16,6 +16,7 @@
 struct bpf_queue_stack {
 	struct bpf_map map;
 	raw_spinlock_t lock;
+	int __percpu *map_locked;
 	u32 head, tail;
 	u32 size; /* max_entries + 1 */
 
@@ -66,6 +67,7 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
 	int numa_node = bpf_map_attr_numa_node(attr);
 	struct bpf_queue_stack *qs;
 	u64 size, queue_size;
+	int err = -ENOMEM;
 
 	size = (u64) attr->max_entries + 1;
 	queue_size = sizeof(*qs) + size * attr->value_size;
@@ -80,7 +82,18 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
 
 	raw_spin_lock_init(&qs->lock);
 
+	qs->map_locked = bpf_map_alloc_percpu(&qs->map,
+					      sizeof(*qs->map_locked),
+					      sizeof(*qs->map_locked),
+					      GFP_USER);
+	if (!qs->map_locked)
+		goto free_map;
+
 	return &qs->map;
+
+free_map:
+	bpf_map_area_free(qs);
+	return ERR_PTR(err);
 }
 
 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
@@ -88,9 +101,37 @@ static void queue_stack_map_free(struct bpf_map *map)
 {
 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
 
+	free_percpu(qs->map_locked);
 	bpf_map_area_free(qs);
 }
 
+static inline int queue_stack_map_lock(struct bpf_queue_stack *qs,
+				       unsigned long *pflags)
+{
+	unsigned long flags;
+
+	preempt_disable();
+	if (unlikely(__this_cpu_inc_return(*qs->map_locked) != 1)) {
+		__this_cpu_dec(*qs->map_locked);
+		preempt_enable();
+		return -EBUSY;
+	}
+
+	raw_spin_lock_irqsave(&qs->lock, flags);
+	*pflags = flags;
+
+	return 0;
+}
+
+
+static inline void queue_stack_map_unlock(struct bpf_queue_stack *qs,
+					  unsigned long flags)
+{
+	raw_spin_unlock_irqrestore(&qs->lock, flags);
+	__this_cpu_dec(*qs->map_locked);
+	preempt_enable();
+}
+
 static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
 {
 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
@@ -98,7 +139,9 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
 	int err = 0;
 	void *ptr;
 
-	raw_spin_lock_irqsave(&qs->lock, flags);
+	err = queue_stack_map_lock(qs, &flags);
+	if (err)
+		return err;
 
 	if (queue_stack_map_is_empty(qs)) {
 		memset(value, 0, qs->map.value_size);
@@ -115,7 +158,7 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
 	}
 
 out:
-	raw_spin_unlock_irqrestore(&qs->lock, flags);
+	queue_stack_map_unlock(qs, flags);
 	return err;
 }
 
@@ -128,7 +171,9 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
 	void *ptr;
 	u32 index;
 
-	raw_spin_lock_irqsave(&qs->lock, flags);
+	err = queue_stack_map_lock(qs, &flags);
+	if (err)
+		return err;
 
 	if (queue_stack_map_is_empty(qs)) {
 		memset(value, 0, qs->map.value_size);
@@ -147,7 +192,7 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
 		qs->head = index;
 
 out:
-	raw_spin_unlock_irqrestore(&qs->lock, flags);
+	queue_stack_map_unlock(qs, flags);
 	return err;
 }
 
@@ -193,7 +238,9 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
 	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
 		return -EINVAL;
 
-	raw_spin_lock_irqsave(&qs->lock, irq_flags);
+	err = queue_stack_map_lock(qs, &irq_flags);
+	if (err)
+		return err;
 
 	if (queue_stack_map_is_full(qs)) {
 		if (!replace) {
@@ -212,7 +259,7 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
 		qs->head = 0;
 
 out:
-	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
+	queue_stack_map_unlock(qs, irq_flags);
 	return err;
 }
 

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: Possible deadlock in bpf queue map
  2023-09-07 13:04     ` Toke Høiland-Jørgensen
@ 2023-09-07 15:40       ` Alexei Starovoitov
  2023-09-07 16:05         ` Toke Høiland-Jørgensen
  2023-09-08  0:56       ` Hou Tao
  1 sibling, 1 reply; 9+ messages in thread
From: Alexei Starovoitov @ 2023-09-07 15:40 UTC (permalink / raw)
  To: Toke Høiland-Jørgensen
  Cc: Kumar Kartikeya Dwivedi, Hsin-Wei Hung, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, John Fastabend, KP Singh, Network Development, bpf,
	Arnaldo Carvalho de Melo

On Thu, Sep 7, 2023 at 6:04 AM Toke Høiland-Jørgensen <toke@kernel.org> wrote:
>
> Kumar Kartikeya Dwivedi <memxor@gmail.com> writes:
>
> > On Thu, 7 Sept 2023 at 12:26, Toke Høiland-Jørgensen <toke@kernel.org> wrote:
> >>
> >> +Arnaldo
> >>
> >> > Hi,
> >> >
> >> > Our bpf fuzzer, a customized Syzkaller, triggered a lockdep warning in
> >> > the bpf queue map in v5.15. Since queue_stack_maps.c has no major changes
> >> > since v5.15, we think this should still exist in the latest kernel.
> >> > The bug can be occasionally triggered, and we suspect one of the
> >> > eBPF programs involved to be the following one. We also attached the lockdep
> >> > warning at the end.
> >> >
> >> > #define DEFINE_BPF_MAP_NO_KEY(the_map, TypeOfMap, MapFlags,
> >> > TypeOfValue, MaxEntries) \
> >> >         struct {                                                        \
> >> >             __uint(type, TypeOfMap);                                    \
> >> >             __uint(map_flags, (MapFlags));                              \
> >> >             __uint(max_entries, (MaxEntries));                          \
> >> >             __type(value, TypeOfValue);                                 \
> >> >         } the_map SEC(".maps");
> >> >
> >> > DEFINE_BPF_MAP_NO_KEY(map_0, BPF_MAP_TYPE_QUEUE, 0 | BPF_F_WRONLY,
> >> > struct_0, 162);
> >> > SEC("perf_event")
> >> > int func(struct bpf_perf_event_data *ctx) {
> >> >         char v0[96] = {};
> >> >         uint64_t v1 = 0;
> >> >         v1 = bpf_map_pop_elem(&map_0, v0);
> >> >         return 163819661;
> >> > }
> >> >
> >> >
> >> > The program is attached to the following perf event.
> >> >
> >> > struct perf_event_attr attr_type_hw = {
> >> >         .type = PERF_TYPE_HARDWARE,
> >> >         .config = PERF_COUNT_HW_CPU_CYCLES,
> >> >         .sample_freq = 50,
> >> >         .inherit = 1,
> >> >         .freq = 1,
> >> > };
> >> >
> >> > ================================WARNING: inconsistent lock state
> >> > 5.15.26+ #2 Not tainted
> >> > --------------------------------
> >> > inconsistent {INITIAL USE} -> {IN-NMI} usage.
> >> > syz-executor.5/19749 [HC1[1]:SC0[0]:HE0:SE1] takes:
> >> > ffff88804c9fc198 (&qs->lock){..-.}-{2:2}, at: __queue_map_get+0x31/0x250
> >> > {INITIAL USE} state was registered at:
> >> >   lock_acquire+0x1a3/0x4b0
> >> >   _raw_spin_lock_irqsave+0x48/0x60
> >> >   __queue_map_get+0x31/0x250
> >> >   bpf_prog_577904e86c81dead_func+0x12/0x4b4
> >> >   trace_call_bpf+0x262/0x5d0
> >> >   perf_trace_run_bpf_submit+0x91/0x1c0
> >> >   perf_trace_sched_switch+0x46c/0x700
> >> >   __schedule+0x11b5/0x24a0
> >> >   schedule+0xd4/0x270
> >> >   futex_wait_queue_me+0x25f/0x520
> >> >   futex_wait+0x1e0/0x5f0
> >> >   do_futex+0x395/0x1890
> >> >   __x64_sys_futex+0x1cb/0x480
> >> >   do_syscall_64+0x3b/0xc0
> >> >   entry_SYSCALL_64_after_hwframe+0x44/0xae
> >> > irq event stamp: 13640
> >> > hardirqs last  enabled at (13639): [<ffffffff95eb2bf4>]
> >> > _raw_spin_unlock_irq+0x24/0x40
> >> > hardirqs last disabled at (13640): [<ffffffff95eb2d4d>]
> >> > _raw_spin_lock_irqsave+0x5d/0x60
> >> > softirqs last  enabled at (13464): [<ffffffff93e26de5>] __sys_bpf+0x3e15/0x4e80
> >> > softirqs last disabled at (13462): [<ffffffff93e26da3>] __sys_bpf+0x3dd3/0x4e80
> >> >
> >> > other info that might help us debug this:
> >> >  Possible unsafe locking scenario:
> >> >
> >> >        CPU0
> >> >        ----
> >> >   lock(&qs->lock);
> >> >   <Interrupt>
> >> >     lock(&qs->lock);
> >>
> >> Hmm, so that lock() uses raw_spin_lock_irqsave(), which *should* be
> >> disabling interrupts entirely for the critical section. But I guess a
> >> Perf hardware event can still trigger? Which seems like it would
> >> potentially wreak havoc with lots of things, not just this queue map
> >> function?
> >>
> >> No idea how to protect against this, though. Hoping Arnaldo knows? :)
> >>
> >
> > The locking should probably be protected by a percpu integer counter,
> > incremented and decremented before and after the lock is taken,
> > respectively. If it is already non-zero, then -EBUSY should be
> > returned. It is similar to what htab_lock_bucket protects against in
> > hashtab.c.
>
> Ah, neat! Okay, seems straight-forward enough to replicate. Hsin, could
> you please check if the patch below gets rid of the splat?

Instead of adding all this complexity for the map that is so rarely used
it's easier to disallow it perf_event prog types.
Or add a single if (in_nmi()) return -EBUSY.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: Possible deadlock in bpf queue map
  2023-09-07 15:40       ` Alexei Starovoitov
@ 2023-09-07 16:05         ` Toke Høiland-Jørgensen
  2023-09-07 16:08           ` Alexei Starovoitov
  0 siblings, 1 reply; 9+ messages in thread
From: Toke Høiland-Jørgensen @ 2023-09-07 16:05 UTC (permalink / raw)
  To: Alexei Starovoitov
  Cc: Kumar Kartikeya Dwivedi, Hsin-Wei Hung, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, John Fastabend, KP Singh, Network Development, bpf,
	Arnaldo Carvalho de Melo

Alexei Starovoitov <alexei.starovoitov@gmail.com> writes:

> On Thu, Sep 7, 2023 at 6:04 AM Toke Høiland-Jørgensen <toke@kernel.org> wrote:
>>
>> Kumar Kartikeya Dwivedi <memxor@gmail.com> writes:
>>
>> > On Thu, 7 Sept 2023 at 12:26, Toke Høiland-Jørgensen <toke@kernel.org> wrote:
>> >>
>> >> +Arnaldo
>> >>
>> >> > Hi,
>> >> >
>> >> > Our bpf fuzzer, a customized Syzkaller, triggered a lockdep warning in
>> >> > the bpf queue map in v5.15. Since queue_stack_maps.c has no major changes
>> >> > since v5.15, we think this should still exist in the latest kernel.
>> >> > The bug can be occasionally triggered, and we suspect one of the
>> >> > eBPF programs involved to be the following one. We also attached the lockdep
>> >> > warning at the end.
>> >> >
>> >> > #define DEFINE_BPF_MAP_NO_KEY(the_map, TypeOfMap, MapFlags,
>> >> > TypeOfValue, MaxEntries) \
>> >> >         struct {                                                        \
>> >> >             __uint(type, TypeOfMap);                                    \
>> >> >             __uint(map_flags, (MapFlags));                              \
>> >> >             __uint(max_entries, (MaxEntries));                          \
>> >> >             __type(value, TypeOfValue);                                 \
>> >> >         } the_map SEC(".maps");
>> >> >
>> >> > DEFINE_BPF_MAP_NO_KEY(map_0, BPF_MAP_TYPE_QUEUE, 0 | BPF_F_WRONLY,
>> >> > struct_0, 162);
>> >> > SEC("perf_event")
>> >> > int func(struct bpf_perf_event_data *ctx) {
>> >> >         char v0[96] = {};
>> >> >         uint64_t v1 = 0;
>> >> >         v1 = bpf_map_pop_elem(&map_0, v0);
>> >> >         return 163819661;
>> >> > }
>> >> >
>> >> >
>> >> > The program is attached to the following perf event.
>> >> >
>> >> > struct perf_event_attr attr_type_hw = {
>> >> >         .type = PERF_TYPE_HARDWARE,
>> >> >         .config = PERF_COUNT_HW_CPU_CYCLES,
>> >> >         .sample_freq = 50,
>> >> >         .inherit = 1,
>> >> >         .freq = 1,
>> >> > };
>> >> >
>> >> > ================================WARNING: inconsistent lock state
>> >> > 5.15.26+ #2 Not tainted
>> >> > --------------------------------
>> >> > inconsistent {INITIAL USE} -> {IN-NMI} usage.
>> >> > syz-executor.5/19749 [HC1[1]:SC0[0]:HE0:SE1] takes:
>> >> > ffff88804c9fc198 (&qs->lock){..-.}-{2:2}, at: __queue_map_get+0x31/0x250
>> >> > {INITIAL USE} state was registered at:
>> >> >   lock_acquire+0x1a3/0x4b0
>> >> >   _raw_spin_lock_irqsave+0x48/0x60
>> >> >   __queue_map_get+0x31/0x250
>> >> >   bpf_prog_577904e86c81dead_func+0x12/0x4b4
>> >> >   trace_call_bpf+0x262/0x5d0
>> >> >   perf_trace_run_bpf_submit+0x91/0x1c0
>> >> >   perf_trace_sched_switch+0x46c/0x700
>> >> >   __schedule+0x11b5/0x24a0
>> >> >   schedule+0xd4/0x270
>> >> >   futex_wait_queue_me+0x25f/0x520
>> >> >   futex_wait+0x1e0/0x5f0
>> >> >   do_futex+0x395/0x1890
>> >> >   __x64_sys_futex+0x1cb/0x480
>> >> >   do_syscall_64+0x3b/0xc0
>> >> >   entry_SYSCALL_64_after_hwframe+0x44/0xae
>> >> > irq event stamp: 13640
>> >> > hardirqs last  enabled at (13639): [<ffffffff95eb2bf4>]
>> >> > _raw_spin_unlock_irq+0x24/0x40
>> >> > hardirqs last disabled at (13640): [<ffffffff95eb2d4d>]
>> >> > _raw_spin_lock_irqsave+0x5d/0x60
>> >> > softirqs last  enabled at (13464): [<ffffffff93e26de5>] __sys_bpf+0x3e15/0x4e80
>> >> > softirqs last disabled at (13462): [<ffffffff93e26da3>] __sys_bpf+0x3dd3/0x4e80
>> >> >
>> >> > other info that might help us debug this:
>> >> >  Possible unsafe locking scenario:
>> >> >
>> >> >        CPU0
>> >> >        ----
>> >> >   lock(&qs->lock);
>> >> >   <Interrupt>
>> >> >     lock(&qs->lock);
>> >>
>> >> Hmm, so that lock() uses raw_spin_lock_irqsave(), which *should* be
>> >> disabling interrupts entirely for the critical section. But I guess a
>> >> Perf hardware event can still trigger? Which seems like it would
>> >> potentially wreak havoc with lots of things, not just this queue map
>> >> function?
>> >>
>> >> No idea how to protect against this, though. Hoping Arnaldo knows? :)
>> >>
>> >
>> > The locking should probably be protected by a percpu integer counter,
>> > incremented and decremented before and after the lock is taken,
>> > respectively. If it is already non-zero, then -EBUSY should be
>> > returned. It is similar to what htab_lock_bucket protects against in
>> > hashtab.c.
>>
>> Ah, neat! Okay, seems straight-forward enough to replicate. Hsin, could
>> you please check if the patch below gets rid of the splat?
>
> Instead of adding all this complexity for the map that is so rarely used
> it's easier to disallow it perf_event prog types.
> Or add a single if (in_nmi()) return -EBUSY.

Heh, I was actually thinking the "nmi-safe lock" thing might be
something that could be neatly encapsulated into the lock library
helpers. But OK, so you mean just something like the below, then?

I'll send a proper patch for that later if no one objects (or beats me
to it) :)

-Toke

diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 8d2ddcb7566b..138525424745 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -98,6 +98,9 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
        int err = 0;
        void *ptr;
 
+       if (in_nmi())
+               return -EBUSY;
+
        raw_spin_lock_irqsave(&qs->lock, flags);
 
        if (queue_stack_map_is_empty(qs)) {
@@ -128,6 +131,9 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
        void *ptr;
        u32 index;
 
+       if (in_nmi())
+               return -EBUSY;
+
        raw_spin_lock_irqsave(&qs->lock, flags);
 
        if (queue_stack_map_is_empty(qs)) {
@@ -193,6 +199,9 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
        if (flags & BPF_NOEXIST || flags > BPF_EXIST)
                return -EINVAL;
 
+       if (in_nmi())
+               return -EBUSY;
+
        raw_spin_lock_irqsave(&qs->lock, irq_flags);
 
        if (queue_stack_map_is_full(qs)) {

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: Possible deadlock in bpf queue map
  2023-09-07 16:05         ` Toke Høiland-Jørgensen
@ 2023-09-07 16:08           ` Alexei Starovoitov
  2023-09-08  5:29             ` Hsin-Wei Hung
  0 siblings, 1 reply; 9+ messages in thread
From: Alexei Starovoitov @ 2023-09-07 16:08 UTC (permalink / raw)
  To: Toke Høiland-Jørgensen
  Cc: Kumar Kartikeya Dwivedi, Hsin-Wei Hung, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, John Fastabend, KP Singh, Network Development, bpf,
	Arnaldo Carvalho de Melo

On Thu, Sep 7, 2023 at 9:05 AM Toke Høiland-Jørgensen <toke@kernel.org> wrote:
>
> Alexei Starovoitov <alexei.starovoitov@gmail.com> writes:
>
> > On Thu, Sep 7, 2023 at 6:04 AM Toke Høiland-Jørgensen <toke@kernel.org> wrote:
> >>
> >> Kumar Kartikeya Dwivedi <memxor@gmail.com> writes:
> >>
> >> > On Thu, 7 Sept 2023 at 12:26, Toke Høiland-Jørgensen <toke@kernel.org> wrote:
> >> >>
> >> >> +Arnaldo
> >> >>
> >> >> > Hi,
> >> >> >
> >> >> > Our bpf fuzzer, a customized Syzkaller, triggered a lockdep warning in
> >> >> > the bpf queue map in v5.15. Since queue_stack_maps.c has no major changes
> >> >> > since v5.15, we think this should still exist in the latest kernel.
> >> >> > The bug can be occasionally triggered, and we suspect one of the
> >> >> > eBPF programs involved to be the following one. We also attached the lockdep
> >> >> > warning at the end.
> >> >> >
> >> >> > #define DEFINE_BPF_MAP_NO_KEY(the_map, TypeOfMap, MapFlags,
> >> >> > TypeOfValue, MaxEntries) \
> >> >> >         struct {                                                        \
> >> >> >             __uint(type, TypeOfMap);                                    \
> >> >> >             __uint(map_flags, (MapFlags));                              \
> >> >> >             __uint(max_entries, (MaxEntries));                          \
> >> >> >             __type(value, TypeOfValue);                                 \
> >> >> >         } the_map SEC(".maps");
> >> >> >
> >> >> > DEFINE_BPF_MAP_NO_KEY(map_0, BPF_MAP_TYPE_QUEUE, 0 | BPF_F_WRONLY,
> >> >> > struct_0, 162);
> >> >> > SEC("perf_event")
> >> >> > int func(struct bpf_perf_event_data *ctx) {
> >> >> >         char v0[96] = {};
> >> >> >         uint64_t v1 = 0;
> >> >> >         v1 = bpf_map_pop_elem(&map_0, v0);
> >> >> >         return 163819661;
> >> >> > }
> >> >> >
> >> >> >
> >> >> > The program is attached to the following perf event.
> >> >> >
> >> >> > struct perf_event_attr attr_type_hw = {
> >> >> >         .type = PERF_TYPE_HARDWARE,
> >> >> >         .config = PERF_COUNT_HW_CPU_CYCLES,
> >> >> >         .sample_freq = 50,
> >> >> >         .inherit = 1,
> >> >> >         .freq = 1,
> >> >> > };
> >> >> >
> >> >> > ================================WARNING: inconsistent lock state
> >> >> > 5.15.26+ #2 Not tainted
> >> >> > --------------------------------
> >> >> > inconsistent {INITIAL USE} -> {IN-NMI} usage.
> >> >> > syz-executor.5/19749 [HC1[1]:SC0[0]:HE0:SE1] takes:
> >> >> > ffff88804c9fc198 (&qs->lock){..-.}-{2:2}, at: __queue_map_get+0x31/0x250
> >> >> > {INITIAL USE} state was registered at:
> >> >> >   lock_acquire+0x1a3/0x4b0
> >> >> >   _raw_spin_lock_irqsave+0x48/0x60
> >> >> >   __queue_map_get+0x31/0x250
> >> >> >   bpf_prog_577904e86c81dead_func+0x12/0x4b4
> >> >> >   trace_call_bpf+0x262/0x5d0
> >> >> >   perf_trace_run_bpf_submit+0x91/0x1c0
> >> >> >   perf_trace_sched_switch+0x46c/0x700
> >> >> >   __schedule+0x11b5/0x24a0
> >> >> >   schedule+0xd4/0x270
> >> >> >   futex_wait_queue_me+0x25f/0x520
> >> >> >   futex_wait+0x1e0/0x5f0
> >> >> >   do_futex+0x395/0x1890
> >> >> >   __x64_sys_futex+0x1cb/0x480
> >> >> >   do_syscall_64+0x3b/0xc0
> >> >> >   entry_SYSCALL_64_after_hwframe+0x44/0xae
> >> >> > irq event stamp: 13640
> >> >> > hardirqs last  enabled at (13639): [<ffffffff95eb2bf4>]
> >> >> > _raw_spin_unlock_irq+0x24/0x40
> >> >> > hardirqs last disabled at (13640): [<ffffffff95eb2d4d>]
> >> >> > _raw_spin_lock_irqsave+0x5d/0x60
> >> >> > softirqs last  enabled at (13464): [<ffffffff93e26de5>] __sys_bpf+0x3e15/0x4e80
> >> >> > softirqs last disabled at (13462): [<ffffffff93e26da3>] __sys_bpf+0x3dd3/0x4e80
> >> >> >
> >> >> > other info that might help us debug this:
> >> >> >  Possible unsafe locking scenario:
> >> >> >
> >> >> >        CPU0
> >> >> >        ----
> >> >> >   lock(&qs->lock);
> >> >> >   <Interrupt>
> >> >> >     lock(&qs->lock);
> >> >>
> >> >> Hmm, so that lock() uses raw_spin_lock_irqsave(), which *should* be
> >> >> disabling interrupts entirely for the critical section. But I guess a
> >> >> Perf hardware event can still trigger? Which seems like it would
> >> >> potentially wreak havoc with lots of things, not just this queue map
> >> >> function?
> >> >>
> >> >> No idea how to protect against this, though. Hoping Arnaldo knows? :)
> >> >>
> >> >
> >> > The locking should probably be protected by a percpu integer counter,
> >> > incremented and decremented before and after the lock is taken,
> >> > respectively. If it is already non-zero, then -EBUSY should be
> >> > returned. It is similar to what htab_lock_bucket protects against in
> >> > hashtab.c.
> >>
> >> Ah, neat! Okay, seems straight-forward enough to replicate. Hsin, could
> >> you please check if the patch below gets rid of the splat?
> >
> > Instead of adding all this complexity for the map that is so rarely used
> > it's easier to disallow it perf_event prog types.
> > Or add a single if (in_nmi()) return -EBUSY.
>
> Heh, I was actually thinking the "nmi-safe lock" thing might be
> something that could be neatly encapsulated into the lock library
> helpers. But OK, so you mean just something like the below, then?

Yep.
In bpf timers we do:
        if (in_nmi())
                return -EOPNOTSUPP;
while in ringbuf we have:
        if (in_nmi()) {
                if (!spin_trylock_irqsave(&rb->spinlock, flags))
                        return NULL;
        } else {
                spin_lock_irqsave(&rb->spinlock, flags);
        }

I think both options are fine to use with queue/stack map.
trylock approach is probably less drastic.

> I'll send a proper patch for that later if no one objects (or beats me
> to it) :)
>
> -Toke
>
> diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
> index 8d2ddcb7566b..138525424745 100644
> --- a/kernel/bpf/queue_stack_maps.c
> +++ b/kernel/bpf/queue_stack_maps.c
> @@ -98,6 +98,9 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
>         int err = 0;
>         void *ptr;
>
> +       if (in_nmi())
> +               return -EBUSY;
> +
>         raw_spin_lock_irqsave(&qs->lock, flags);
>
>         if (queue_stack_map_is_empty(qs)) {
> @@ -128,6 +131,9 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
>         void *ptr;
>         u32 index;
>
> +       if (in_nmi())
> +               return -EBUSY;
> +
>         raw_spin_lock_irqsave(&qs->lock, flags);
>
>         if (queue_stack_map_is_empty(qs)) {
> @@ -193,6 +199,9 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
>         if (flags & BPF_NOEXIST || flags > BPF_EXIST)
>                 return -EINVAL;
>
> +       if (in_nmi())
> +               return -EBUSY;
> +
>         raw_spin_lock_irqsave(&qs->lock, irq_flags);
>
>         if (queue_stack_map_is_full(qs)) {

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: Possible deadlock in bpf queue map
  2023-09-07 13:04     ` Toke Høiland-Jørgensen
  2023-09-07 15:40       ` Alexei Starovoitov
@ 2023-09-08  0:56       ` Hou Tao
  1 sibling, 0 replies; 9+ messages in thread
From: Hou Tao @ 2023-09-08  0:56 UTC (permalink / raw)
  To: Toke Høiland-Jørgensen, Kumar Kartikeya Dwivedi
  Cc: Hsin-Wei Hung, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Martin KaFai Lau, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, netdev, bpf, Arnaldo Carvalho de Melo

Hi,

On 9/7/2023 9:04 PM, Toke Høiland-Jørgensen wrote:
> Kumar Kartikeya Dwivedi <memxor@gmail.com> writes:
>
>> On Thu, 7 Sept 2023 at 12:26, Toke Høiland-Jørgensen <toke@kernel.org> wrote:
>>> +Arnaldo
>>>
>>>> Hi,
>>>>
>>>> Our bpf fuzzer, a customized Syzkaller, triggered a lockdep warning in
>>>> the bpf queue map in v5.15. Since queue_stack_maps.c has no major changes
>>>> since v5.15, we think this should still exist in the latest kernel.
>>>> The bug can be occasionally triggered, and we suspect one of the
>>>> eBPF programs involved to be the following one. We also attached the lockdep
>>>> warning at the end.
>>>>
>>>> #define DEFINE_BPF_MAP_NO_KEY(the_map, TypeOfMap, MapFlags,
>>>> TypeOfValue, MaxEntries) \
>>>>         struct {                                                        \
>>>>             __uint(type, TypeOfMap);                                    \
>>>>             __uint(map_flags, (MapFlags));                              \
>>>>             __uint(max_entries, (MaxEntries));                          \
>>>>             __type(value, TypeOfValue);                                 \
>>>>         } the_map SEC(".maps");
>>>>
>>>> DEFINE_BPF_MAP_NO_KEY(map_0, BPF_MAP_TYPE_QUEUE, 0 | BPF_F_WRONLY,
>>>> struct_0, 162);
>>>> SEC("perf_event")
>>>> int func(struct bpf_perf_event_data *ctx) {
>>>>         char v0[96] = {};
>>>>         uint64_t v1 = 0;
>>>>         v1 = bpf_map_pop_elem(&map_0, v0);
>>>>         return 163819661;
>>>> }
>>>>
>>>>
>>>> The program is attached to the following perf event.
>>>>
>>>> struct perf_event_attr attr_type_hw = {
>>>>         .type = PERF_TYPE_HARDWARE,
>>>>         .config = PERF_COUNT_HW_CPU_CYCLES,
>>>>         .sample_freq = 50,
>>>>         .inherit = 1,
>>>>         .freq = 1,
>>>> };
>>>>
>>>> ================================WARNING: inconsistent lock state
>>>> 5.15.26+ #2 Not tainted
>>>> --------------------------------
>>>> inconsistent {INITIAL USE} -> {IN-NMI} usage.
>>>> syz-executor.5/19749 [HC1[1]:SC0[0]:HE0:SE1] takes:
>>>> ffff88804c9fc198 (&qs->lock){..-.}-{2:2}, at: __queue_map_get+0x31/0x250
>>>> {INITIAL USE} state was registered at:
>>>>   lock_acquire+0x1a3/0x4b0
>>>>   _raw_spin_lock_irqsave+0x48/0x60
>>>>   __queue_map_get+0x31/0x250
>>>>   bpf_prog_577904e86c81dead_func+0x12/0x4b4
>>>>   trace_call_bpf+0x262/0x5d0
>>>>   perf_trace_run_bpf_submit+0x91/0x1c0
>>>>   perf_trace_sched_switch+0x46c/0x700
>>>>   __schedule+0x11b5/0x24a0
>>>>   schedule+0xd4/0x270
>>>>   futex_wait_queue_me+0x25f/0x520
>>>>   futex_wait+0x1e0/0x5f0
>>>>   do_futex+0x395/0x1890
>>>>   __x64_sys_futex+0x1cb/0x480
>>>>   do_syscall_64+0x3b/0xc0
>>>>   entry_SYSCALL_64_after_hwframe+0x44/0xae
>>>> irq event stamp: 13640
>>>> hardirqs last  enabled at (13639): [<ffffffff95eb2bf4>]
>>>> _raw_spin_unlock_irq+0x24/0x40
>>>> hardirqs last disabled at (13640): [<ffffffff95eb2d4d>]
>>>> _raw_spin_lock_irqsave+0x5d/0x60
>>>> softirqs last  enabled at (13464): [<ffffffff93e26de5>] __sys_bpf+0x3e15/0x4e80
>>>> softirqs last disabled at (13462): [<ffffffff93e26da3>] __sys_bpf+0x3dd3/0x4e80
>>>>
>>>> other info that might help us debug this:
>>>>  Possible unsafe locking scenario:
>>>>
>>>>        CPU0
>>>>        ----
>>>>   lock(&qs->lock);
>>>>   <Interrupt>
>>>>     lock(&qs->lock);
>>> Hmm, so that lock() uses raw_spin_lock_irqsave(), which *should* be
>>> disabling interrupts entirely for the critical section. But I guess a
>>> Perf hardware event can still trigger? Which seems like it would
>>> potentially wreak havoc with lots of things, not just this queue map
>>> function?
>>>
>>> No idea how to protect against this, though. Hoping Arnaldo knows? :)
>>>

It seems my reply from last night was dropped by mail-list.

>> The locking should probably be protected by a percpu integer counter,
>> incremented and decremented before and after the lock is taken,
>> respectively. If it is already non-zero, then -EBUSY should be
>> returned. It is similar to what htab_lock_bucket protects against in
>> hashtab.c.
> Ah, neat! Okay, seems straight-forward enough to replicate. Hsin, could
> you please check if the patch below gets rid of the splat?

The fixes could fix the potential dead-lock, but I think the lockdep
warning will be there, because lockdep thinks it is only safe to call
try_lock under NMI-contex. So using raw_spin_trylock() for NMI context
will both fix the potential dead-lock and the lockdep splat.

>
> -Toke
>
>
> diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
> index 8d2ddcb7566b..f96945311eec 100644
> --- a/kernel/bpf/queue_stack_maps.c
> +++ b/kernel/bpf/queue_stack_maps.c
> @@ -16,6 +16,7 @@
>  struct bpf_queue_stack {
>  	struct bpf_map map;
>  	raw_spinlock_t lock;
> +	int __percpu *map_locked;
>  	u32 head, tail;
>  	u32 size; /* max_entries + 1 */
>  
> @@ -66,6 +67,7 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
>  	int numa_node = bpf_map_attr_numa_node(attr);
>  	struct bpf_queue_stack *qs;
>  	u64 size, queue_size;
> +	int err = -ENOMEM;
>  
>  	size = (u64) attr->max_entries + 1;
>  	queue_size = sizeof(*qs) + size * attr->value_size;
> @@ -80,7 +82,18 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
>  
>  	raw_spin_lock_init(&qs->lock);
>  
> +	qs->map_locked = bpf_map_alloc_percpu(&qs->map,
> +					      sizeof(*qs->map_locked),
> +					      sizeof(*qs->map_locked),
> +					      GFP_USER);
> +	if (!qs->map_locked)
> +		goto free_map;
> +
>  	return &qs->map;
> +
> +free_map:
> +	bpf_map_area_free(qs);
> +	return ERR_PTR(err);
>  }
>  
>  /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
> @@ -88,9 +101,37 @@ static void queue_stack_map_free(struct bpf_map *map)
>  {
>  	struct bpf_queue_stack *qs = bpf_queue_stack(map);
>  
> +	free_percpu(qs->map_locked);
>  	bpf_map_area_free(qs);
>  }
>  
> +static inline int queue_stack_map_lock(struct bpf_queue_stack *qs,
> +				       unsigned long *pflags)
> +{
> +	unsigned long flags;
> +
> +	preempt_disable();
> +	if (unlikely(__this_cpu_inc_return(*qs->map_locked) != 1)) {
> +		__this_cpu_dec(*qs->map_locked);
> +		preempt_enable();
> +		return -EBUSY;
> +	}
> +
> +	raw_spin_lock_irqsave(&qs->lock, flags);
> +	*pflags = flags;
> +
> +	return 0;
> +}
> +
> +
> +static inline void queue_stack_map_unlock(struct bpf_queue_stack *qs,
> +					  unsigned long flags)
> +{
> +	raw_spin_unlock_irqrestore(&qs->lock, flags);
> +	__this_cpu_dec(*qs->map_locked);
> +	preempt_enable();
> +}
> +
>  static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
>  {
>  	struct bpf_queue_stack *qs = bpf_queue_stack(map);
> @@ -98,7 +139,9 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
>  	int err = 0;
>  	void *ptr;
>  
> -	raw_spin_lock_irqsave(&qs->lock, flags);
> +	err = queue_stack_map_lock(qs, &flags);
> +	if (err)
> +		return err;
>  
>  	if (queue_stack_map_is_empty(qs)) {
>  		memset(value, 0, qs->map.value_size);
> @@ -115,7 +158,7 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
>  	}
>  
>  out:
> -	raw_spin_unlock_irqrestore(&qs->lock, flags);
> +	queue_stack_map_unlock(qs, flags);
>  	return err;
>  }
>  
> @@ -128,7 +171,9 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
>  	void *ptr;
>  	u32 index;
>  
> -	raw_spin_lock_irqsave(&qs->lock, flags);
> +	err = queue_stack_map_lock(qs, &flags);
> +	if (err)
> +		return err;
>  
>  	if (queue_stack_map_is_empty(qs)) {
>  		memset(value, 0, qs->map.value_size);
> @@ -147,7 +192,7 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
>  		qs->head = index;
>  
>  out:
> -	raw_spin_unlock_irqrestore(&qs->lock, flags);
> +	queue_stack_map_unlock(qs, flags);
>  	return err;
>  }
>  
> @@ -193,7 +238,9 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
>  	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
>  		return -EINVAL;
>  
> -	raw_spin_lock_irqsave(&qs->lock, irq_flags);
> +	err = queue_stack_map_lock(qs, &irq_flags);
> +	if (err)
> +		return err;
>  
>  	if (queue_stack_map_is_full(qs)) {
>  		if (!replace) {
> @@ -212,7 +259,7 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
>  		qs->head = 0;
>  
>  out:
> -	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
> +	queue_stack_map_unlock(qs, irq_flags);
>  	return err;
>  }
>  
>
> .


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: Possible deadlock in bpf queue map
  2023-09-07 16:08           ` Alexei Starovoitov
@ 2023-09-08  5:29             ` Hsin-Wei Hung
  0 siblings, 0 replies; 9+ messages in thread
From: Hsin-Wei Hung @ 2023-09-08  5:29 UTC (permalink / raw)
  To: Alexei Starovoitov
  Cc: Toke Høiland-Jørgensen, Kumar Kartikeya Dwivedi,
	Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Network Development, bpf, Arnaldo Carvalho de Melo

On Thu, Sep 7, 2023 at 9:08 AM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Thu, Sep 7, 2023 at 9:05 AM Toke Høiland-Jørgensen <toke@kernel.org> wrote:
> >
> > Alexei Starovoitov <alexei.starovoitov@gmail.com> writes:
> >
> > > On Thu, Sep 7, 2023 at 6:04 AM Toke Høiland-Jørgensen <toke@kernel.org> wrote:
> > >>
> > >> Kumar Kartikeya Dwivedi <memxor@gmail.com> writes:
> > >>
> > >> > On Thu, 7 Sept 2023 at 12:26, Toke Høiland-Jørgensen <toke@kernel.org> wrote:
> > >> >>
> > >> >> +Arnaldo
> > >> >>
> > >> >> > Hi,
> > >> >> >
> > >> >> > Our bpf fuzzer, a customized Syzkaller, triggered a lockdep warning in
> > >> >> > the bpf queue map in v5.15. Since queue_stack_maps.c has no major changes
> > >> >> > since v5.15, we think this should still exist in the latest kernel.
> > >> >> > The bug can be occasionally triggered, and we suspect one of the
> > >> >> > eBPF programs involved to be the following one. We also attached the lockdep
> > >> >> > warning at the end.
> > >> >> >
> > >> >> > #define DEFINE_BPF_MAP_NO_KEY(the_map, TypeOfMap, MapFlags,
> > >> >> > TypeOfValue, MaxEntries) \
> > >> >> >         struct {                                                        \
> > >> >> >             __uint(type, TypeOfMap);                                    \
> > >> >> >             __uint(map_flags, (MapFlags));                              \
> > >> >> >             __uint(max_entries, (MaxEntries));                          \
> > >> >> >             __type(value, TypeOfValue);                                 \
> > >> >> >         } the_map SEC(".maps");
> > >> >> >
> > >> >> > DEFINE_BPF_MAP_NO_KEY(map_0, BPF_MAP_TYPE_QUEUE, 0 | BPF_F_WRONLY,
> > >> >> > struct_0, 162);
> > >> >> > SEC("perf_event")
> > >> >> > int func(struct bpf_perf_event_data *ctx) {
> > >> >> >         char v0[96] = {};
> > >> >> >         uint64_t v1 = 0;
> > >> >> >         v1 = bpf_map_pop_elem(&map_0, v0);
> > >> >> >         return 163819661;
> > >> >> > }
> > >> >> >
> > >> >> >
> > >> >> > The program is attached to the following perf event.
> > >> >> >
> > >> >> > struct perf_event_attr attr_type_hw = {
> > >> >> >         .type = PERF_TYPE_HARDWARE,
> > >> >> >         .config = PERF_COUNT_HW_CPU_CYCLES,
> > >> >> >         .sample_freq = 50,
> > >> >> >         .inherit = 1,
> > >> >> >         .freq = 1,
> > >> >> > };
> > >> >> >
> > >> >> > ================================WARNING: inconsistent lock state
> > >> >> > 5.15.26+ #2 Not tainted
> > >> >> > --------------------------------
> > >> >> > inconsistent {INITIAL USE} -> {IN-NMI} usage.
> > >> >> > syz-executor.5/19749 [HC1[1]:SC0[0]:HE0:SE1] takes:
> > >> >> > ffff88804c9fc198 (&qs->lock){..-.}-{2:2}, at: __queue_map_get+0x31/0x250
> > >> >> > {INITIAL USE} state was registered at:
> > >> >> >   lock_acquire+0x1a3/0x4b0
> > >> >> >   _raw_spin_lock_irqsave+0x48/0x60
> > >> >> >   __queue_map_get+0x31/0x250
> > >> >> >   bpf_prog_577904e86c81dead_func+0x12/0x4b4
> > >> >> >   trace_call_bpf+0x262/0x5d0
> > >> >> >   perf_trace_run_bpf_submit+0x91/0x1c0
> > >> >> >   perf_trace_sched_switch+0x46c/0x700
> > >> >> >   __schedule+0x11b5/0x24a0
> > >> >> >   schedule+0xd4/0x270
> > >> >> >   futex_wait_queue_me+0x25f/0x520
> > >> >> >   futex_wait+0x1e0/0x5f0
> > >> >> >   do_futex+0x395/0x1890
> > >> >> >   __x64_sys_futex+0x1cb/0x480
> > >> >> >   do_syscall_64+0x3b/0xc0
> > >> >> >   entry_SYSCALL_64_after_hwframe+0x44/0xae
> > >> >> > irq event stamp: 13640
> > >> >> > hardirqs last  enabled at (13639): [<ffffffff95eb2bf4>]
> > >> >> > _raw_spin_unlock_irq+0x24/0x40
> > >> >> > hardirqs last disabled at (13640): [<ffffffff95eb2d4d>]
> > >> >> > _raw_spin_lock_irqsave+0x5d/0x60
> > >> >> > softirqs last  enabled at (13464): [<ffffffff93e26de5>] __sys_bpf+0x3e15/0x4e80
> > >> >> > softirqs last disabled at (13462): [<ffffffff93e26da3>] __sys_bpf+0x3dd3/0x4e80
> > >> >> >
> > >> >> > other info that might help us debug this:
> > >> >> >  Possible unsafe locking scenario:
> > >> >> >
> > >> >> >        CPU0
> > >> >> >        ----
> > >> >> >   lock(&qs->lock);
> > >> >> >   <Interrupt>
> > >> >> >     lock(&qs->lock);
> > >> >>
> > >> >> Hmm, so that lock() uses raw_spin_lock_irqsave(), which *should* be
> > >> >> disabling interrupts entirely for the critical section. But I guess a
> > >> >> Perf hardware event can still trigger? Which seems like it would
> > >> >> potentially wreak havoc with lots of things, not just this queue map
> > >> >> function?
> > >> >>
> > >> >> No idea how to protect against this, though. Hoping Arnaldo knows? :)
> > >> >>
> > >> >
> > >> > The locking should probably be protected by a percpu integer counter,
> > >> > incremented and decremented before and after the lock is taken,
> > >> > respectively. If it is already non-zero, then -EBUSY should be
> > >> > returned. It is similar to what htab_lock_bucket protects against in
> > >> > hashtab.c.
> > >>
> > >> Ah, neat! Okay, seems straight-forward enough to replicate. Hsin, could
> > >> you please check if the patch below gets rid of the splat?
> > >
> > > Instead of adding all this complexity for the map that is so rarely used
> > > it's easier to disallow it perf_event prog types.
> > > Or add a single if (in_nmi()) return -EBUSY.
> >
> > Heh, I was actually thinking the "nmi-safe lock" thing might be
> > something that could be neatly encapsulated into the lock library
> > helpers. But OK, so you mean just something like the below, then?
>
> Yep.
> In bpf timers we do:
>         if (in_nmi())
>                 return -EOPNOTSUPP;
> while in ringbuf we have:
>         if (in_nmi()) {
>                 if (!spin_trylock_irqsave(&rb->spinlock, flags))
>                         return NULL;
>         } else {
>                 spin_lock_irqsave(&rb->spinlock, flags);
>         }
>
> I think both options are fine to use with queue/stack map.
> trylock approach is probably less drastic.
>

I tried the trylock approach as shown below. The fuzzer now
no longer triggers the lockdep warning.

-Hsin-Wei

diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index f9c734aaa990..ef95b796a0fa 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -103,7 +103,12 @@ static int __queue_map_get(struct bpf_map *map,
void *value, bool delete)
        int err = 0;
        void *ptr;

-       raw_spin_lock_irqsave(&qs->lock, flags);
+       if (in_nmi()) {
+               if (!raw_spin_trylock_irqsave(&qs->lock, flags))
+                       return -EBUSY;
+       } else {
+               raw_spin_lock_irqsave(&qs->lock, flags);
+       }

        if (queue_stack_map_is_empty(qs)) {
                memset(value, 0, qs->map.value_size);
@@ -133,7 +138,12 @@ static int __stack_map_get(struct bpf_map *map,
void *value, bool delete)
        void *ptr;
        u32 index;

-       raw_spin_lock_irqsave(&qs->lock, flags);
+       if (in_nmi()) {
+               if (!raw_spin_trylock_irqsave(&qs->lock, flags))
+                       return -EBUSY;
+       } else {
+               raw_spin_lock_irqsave(&qs->lock, flags);
+       }

        if (queue_stack_map_is_empty(qs)) {
                memset(value, 0, qs->map.value_size);
@@ -198,7 +208,12 @@ static int queue_stack_map_push_elem(struct
bpf_map *map, void *value,
        if (flags & BPF_NOEXIST || flags > BPF_EXIST)
                return -EINVAL;

-       raw_spin_lock_irqsave(&qs->lock, irq_flags);
+       if (in_nmi()) {
+               if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
+                       return -EBUSY;
+       } else {
+               raw_spin_lock_irqsave(&qs->lock, irq_flags);
+       }

        if (queue_stack_map_is_full(qs)) {
                if (!replace) {




> > I'll send a proper patch for that later if no one objects (or beats me
> > to it) :)
> >
> > -Toke
> >
> > diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
> > index 8d2ddcb7566b..138525424745 100644
> > --- a/kernel/bpf/queue_stack_maps.c
> > +++ b/kernel/bpf/queue_stack_maps.c
> > @@ -98,6 +98,9 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
> >         int err = 0;
> >         void *ptr;
> >
> > +       if (in_nmi())
> > +               return -EBUSY;
> > +
> >         raw_spin_lock_irqsave(&qs->lock, flags);
> >
> >         if (queue_stack_map_is_empty(qs)) {
> > @@ -128,6 +131,9 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
> >         void *ptr;
> >         u32 index;
> >
> > +       if (in_nmi())
> > +               return -EBUSY;
> > +
> >         raw_spin_lock_irqsave(&qs->lock, flags);
> >
> >         if (queue_stack_map_is_empty(qs)) {
> > @@ -193,6 +199,9 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
> >         if (flags & BPF_NOEXIST || flags > BPF_EXIST)
> >                 return -EINVAL;
> >
> > +       if (in_nmi())
> > +               return -EBUSY;
> > +
> >         raw_spin_lock_irqsave(&qs->lock, irq_flags);
> >
> >         if (queue_stack_map_is_full(qs)) {

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2023-09-08  5:30 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-09-07  5:02 Possible deadlock in bpf queue map Hsin-Wei Hung
2023-09-07 10:26 ` Toke Høiland-Jørgensen
2023-09-07 11:13   ` Kumar Kartikeya Dwivedi
2023-09-07 13:04     ` Toke Høiland-Jørgensen
2023-09-07 15:40       ` Alexei Starovoitov
2023-09-07 16:05         ` Toke Høiland-Jørgensen
2023-09-07 16:08           ` Alexei Starovoitov
2023-09-08  5:29             ` Hsin-Wei Hung
2023-09-08  0:56       ` Hou Tao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).