BPF List
 help / color / mirror / Atom feed
* [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map
@ 2024-05-14 12:40 Siddharth Chintamaneni
  2024-05-14 12:40 ` [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps Siddharth Chintamaneni
  2024-05-15 17:02 ` [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Kumar Kartikeya Dwivedi
  0 siblings, 2 replies; 12+ messages in thread
From: Siddharth Chintamaneni @ 2024-05-14 12:40 UTC (permalink / raw)
  To: bpf
  Cc: alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
	rjsu26, sairoop, miloc, memxor, Siddharth Chintamaneni

Added selftests to check for nested deadlocks in queue  and stack maps.

test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__open 0 nsec
test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__load 0 nsec
test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__attach 0 nsec
test_map_queue_stack_nesting_success:PASS:MAP Write 0 nsec
test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
384/1   test_queue_stack_nested_map/map_queue_nesting:OK
test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__open 0 nsec
test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__load 0 nsec
test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__attach 0 nsec
test_map_queue_stack_nesting_success:PASS:MAP Write 0 nsec
test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
384/2   test_queue_stack_nested_map/map_stack_nesting:OK
384     test_queue_stack_nested_map:OK
Summary: 1/2 PASSED, 0 SKIPPED, 0 FAILED

Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
---
 .../prog_tests/test_queue_stack_nested_map.c  |  69 +++++++++++
 .../bpf/progs/test_queue_stack_nested_map.c   | 116 ++++++++++++++++++
 2 files changed, 185 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c
 create mode 100644 tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c

diff --git a/tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c b/tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c
new file mode 100644
index 000000000000..fc46561788af
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_queue_stack_nested_map.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "test_queue_stack_nested_map.skel.h"
+
+
+static void test_map_queue_stack_nesting_success(bool is_map_queue)
+{
+	struct test_queue_stack_nested_map *skel;
+	int err;
+
+	skel = test_queue_stack_nested_map__open();
+	if (!ASSERT_OK_PTR(skel, "test_queue_stack_nested_map__open"))
+		return;
+
+	err = test_queue_stack_nested_map__load(skel);
+	if (!ASSERT_OK(err, "test_queue_stack_nested_map__load"))
+		goto out;
+
+	skel->bss->pid = getpid();
+	err = test_queue_stack_nested_map__attach(skel);
+	if (!ASSERT_OK(err, "test_queue_stack_nested_map__attach"))
+		goto out;
+
+	/* trigger map from userspace to check nesting */
+	int value = 0;
+
+	do {
+		if (is_map_queue) {
+			err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_queue),
+								NULL, &value, 0);
+			if (err < 0)
+				break;
+			err = bpf_map_lookup_and_delete_elem(bpf_map__fd(skel->maps.map_queue),
+								 NULL, &value);
+		} else {
+			err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_stack),
+								NULL, &value, 0);
+			if (err < 0)
+				break;
+			err = bpf_map_lookup_and_delete_elem(bpf_map__fd(skel->maps.map_stack),
+								NULL, &value);
+		}
+	} while (0);
+
+
+	if (!ASSERT_OK(err, "MAP Write"))
+		goto out;
+
+	if (is_map_queue) {
+		ASSERT_EQ(skel->bss->err_queue_push, -EBUSY, "no map nesting");
+		ASSERT_EQ(skel->bss->err_queue_pop, -EBUSY, "no map nesting");
+	} else {
+		ASSERT_EQ(skel->bss->err_stack, -EBUSY, "no map nesting");
+	}
+out:
+	test_queue_stack_nested_map__destroy(skel);
+}
+
+void test_test_queue_stack_nested_map(void)
+{
+	if (test__start_subtest("map_queue_nesting"))
+		test_map_queue_stack_nesting_success(true);
+	if (test__start_subtest("map_stack_nesting"))
+		test_map_queue_stack_nesting_success(false);
+
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c b/tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c
new file mode 100644
index 000000000000..893a37593206
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_queue_stack_nested_map.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct {
+	__uint(type, BPF_MAP_TYPE_STACK);
+	__uint(max_entries, 32);
+	__uint(key_size, 0);
+	__uint(value_size, sizeof(__u32));
+} map_stack SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_QUEUE);
+	__uint(max_entries, 32);
+	__uint(key_size, 0);
+	__uint(value_size, sizeof(__u32));
+} map_queue SEC(".maps");
+
+
+int err_queue_push;
+int err_queue_pop;
+int err_stack;
+int pid;
+__u32 trigger_flag_queue_push;
+__u32 trigger_flag_queue_pop;
+__u32 trigger_flag_stack;
+
+SEC("fentry/queue_stack_map_push_elem")
+int BPF_PROG(test_queue_stack_push_trigger, raw_spinlock_t *lock, unsigned long flags)
+{
+
+	if ((bpf_get_current_pid_tgid() >> 32) != pid)
+		return 0;
+
+
+	trigger_flag_queue_push = 1;
+
+	return 0;
+}
+
+SEC("fentry/queue_map_pop_elem")
+int BPF_PROG(test_queue_pop_trigger, raw_spinlock_t *lock, unsigned long flags)
+{
+
+	if ((bpf_get_current_pid_tgid() >> 32) != pid)
+		return 0;
+
+	trigger_flag_queue_pop = 1;
+
+	return 0;
+}
+
+
+SEC("fentry/stack_map_pop_elem")
+int BPF_PROG(test_stack_pop_trigger, raw_spinlock_t *lock, unsigned long flags)
+{
+
+	if ((bpf_get_current_pid_tgid() >> 32) != pid)
+		return 0;
+
+	trigger_flag_stack = 1;
+
+	return 0;
+}
+
+SEC("fentry/_raw_spin_unlock_irqrestore")
+int BPF_PROG(test_queue_pop_nesting, raw_spinlock_t *lock, unsigned long flags)
+{
+	__u32 val;
+
+	if ((bpf_get_current_pid_tgid() >> 32) != pid || trigger_flag_queue_pop != 1)
+		return 0;
+
+
+	err_queue_pop = bpf_map_pop_elem(&map_queue, &val);
+
+	trigger_flag_queue_pop = 0;
+
+	return 0;
+}
+
+SEC("fentry/_raw_spin_unlock_irqrestore")
+int BPF_PROG(test_stack_nesting, raw_spinlock_t *lock, unsigned long flags)
+{
+	__u32 val;
+
+	if ((bpf_get_current_pid_tgid() >> 32) != pid || trigger_flag_stack != 1)
+		return 0;
+
+
+	err_stack = bpf_map_pop_elem(&map_stack, &val);
+
+	trigger_flag_stack = 0;
+
+	return 0;
+}
+
+
+SEC("fentry/_raw_spin_unlock_irqrestore")
+int BPF_PROG(test_queue_push_nesting, raw_spinlock_t *lock, unsigned long flags)
+{
+	__u32 val = 1;
+
+	if ((bpf_get_current_pid_tgid() >> 32) != pid || trigger_flag_queue_push != 1) {
+		return 0;
+	}
+
+	err_queue_push = bpf_map_push_elem(&map_queue, &val, 0);
+
+	trigger_flag_queue_push = 0;
+
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
-- 
2.44.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps
  2024-05-14 12:40 [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Siddharth Chintamaneni
@ 2024-05-14 12:40 ` Siddharth Chintamaneni
  2024-05-15 17:32   ` Kumar Kartikeya Dwivedi
                     ` (2 more replies)
  2024-05-15 17:02 ` [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Kumar Kartikeya Dwivedi
  1 sibling, 3 replies; 12+ messages in thread
From: Siddharth Chintamaneni @ 2024-05-14 12:40 UTC (permalink / raw)
  To: bpf
  Cc: alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
	rjsu26, sairoop, miloc, memxor, Siddharth Chintamaneni,
	syzbot+8bdfc2c53fb2b63e1871

This patch is a revised version which addresses a possible deadlock issue in
queue and stack map types.

Deadlock could happen when a nested BPF program acquires the same lock
as the parent BPF program to perform a write operation on the same map
as the first one. This bug is also reported by syzbot.

Link: https://lore.kernel.org/lkml/0000000000004c3fc90615f37756@google.com/
Reported-by: syzbot+8bdfc2c53fb2b63e1871@syzkaller.appspotmail.com
Fixes: f1a2e44a3aec ("bpf: add queue and stack maps")
Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
---
 kernel/bpf/queue_stack_maps.c | 76 +++++++++++++++++++++++++++++++++--
 1 file changed, 73 insertions(+), 3 deletions(-)

diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index d869f51ea93a..b5ed76c9ddd7 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -13,11 +13,13 @@
 #define QUEUE_STACK_CREATE_FLAG_MASK \
 	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
 
+
 struct bpf_queue_stack {
 	struct bpf_map map;
 	raw_spinlock_t lock;
 	u32 head, tail;
 	u32 size; /* max_entries + 1 */
+	int __percpu *map_locked;
 
 	char elements[] __aligned(8);
 };
@@ -78,6 +80,15 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
 
 	qs->size = size;
 
+	qs->map_locked = bpf_map_alloc_percpu(&qs->map,
+						sizeof(int),
+						sizeof(int),
+						GFP_USER | __GFP_NOWARN);
+	if (!qs->map_locked) {
+		bpf_map_area_free(qs);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	raw_spin_lock_init(&qs->lock);
 
 	return &qs->map;
@@ -88,19 +99,57 @@ static void queue_stack_map_free(struct bpf_map *map)
 {
 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
 
+	free_percpu(qs->map_locked);
 	bpf_map_area_free(qs);
 }
 
+static inline int map_lock_inc(struct bpf_queue_stack *qs)
+{
+	unsigned long flags;
+
+	preempt_disable();
+	local_irq_save(flags);
+	if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
+		__this_cpu_dec(*(qs->map_locked));
+		local_irq_restore(flags);
+		preempt_enable();
+		return -EBUSY;
+	}
+
+	local_irq_restore(flags);
+	preempt_enable();
+
+	return 0;
+}
+
+static inline void map_unlock_dec(struct bpf_queue_stack *qs)
+{
+	unsigned long flags;
+
+	preempt_disable();
+	local_irq_save(flags);
+	__this_cpu_dec(*(qs->map_locked));
+	local_irq_restore(flags);
+	preempt_enable();
+}
+
 static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
 {
 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
 	unsigned long flags;
 	int err = 0;
 	void *ptr;
+	int ret;
+
+	ret = map_lock_inc(qs);
+	if (ret)
+		return ret;
 
 	if (in_nmi()) {
-		if (!raw_spin_trylock_irqsave(&qs->lock, flags))
+		if (!raw_spin_trylock_irqsave(&qs->lock, flags)) {
+			map_unlock_dec(qs);
 			return -EBUSY;
+		}
 	} else {
 		raw_spin_lock_irqsave(&qs->lock, flags);
 	}
@@ -121,6 +170,8 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
 
 out:
 	raw_spin_unlock_irqrestore(&qs->lock, flags);
+	map_unlock_dec(qs);
+
 	return err;
 }
 
@@ -132,10 +183,17 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
 	int err = 0;
 	void *ptr;
 	u32 index;
+	int ret;
+
+	ret = map_lock_inc(qs);
+	if (ret)
+		return ret;
 
 	if (in_nmi()) {
-		if (!raw_spin_trylock_irqsave(&qs->lock, flags))
+		if (!raw_spin_trylock_irqsave(&qs->lock, flags)) {
+			map_unlock_dec(qs);
 			return -EBUSY;
+		}
 	} else {
 		raw_spin_lock_irqsave(&qs->lock, flags);
 	}
@@ -158,6 +216,8 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
 
 out:
 	raw_spin_unlock_irqrestore(&qs->lock, flags);
+	map_unlock_dec(qs);
+
 	return err;
 }
 
@@ -193,6 +253,7 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
 	unsigned long irq_flags;
 	int err = 0;
 	void *dst;
+	int ret;
 
 	/* BPF_EXIST is used to force making room for a new element in case the
 	 * map is full
@@ -203,9 +264,16 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
 	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
 		return -EINVAL;
 
+
+	ret = map_lock_inc(qs);
+	if (ret)
+		return ret;
+
 	if (in_nmi()) {
-		if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
+		if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags)) {
+			map_unlock_dec(qs);
 			return -EBUSY;
+		}
 	} else {
 		raw_spin_lock_irqsave(&qs->lock, irq_flags);
 	}
@@ -228,6 +296,8 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
 
 out:
 	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
+	map_unlock_dec(qs);
+
 	return err;
 }
 
-- 
2.44.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map
  2024-05-14 12:40 [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Siddharth Chintamaneni
  2024-05-14 12:40 ` [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps Siddharth Chintamaneni
@ 2024-05-15 17:02 ` Kumar Kartikeya Dwivedi
  2024-05-15 17:44   ` Siddharth Chintamaneni
  1 sibling, 1 reply; 12+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2024-05-15 17:02 UTC (permalink / raw)
  To: Siddharth Chintamaneni
  Cc: bpf, alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
	rjsu26, sairoop, miloc

On Tue, 14 May 2024 at 14:41, Siddharth Chintamaneni
<sidchintamaneni@gmail.com> wrote:
>
> Added selftests to check for nested deadlocks in queue  and stack maps.
>
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__open 0 nsec
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__load 0 nsec
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__attach 0 nsec
> test_map_queue_stack_nesting_success:PASS:MAP Write 0 nsec
> test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
> test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
> 384/1   test_queue_stack_nested_map/map_queue_nesting:OK
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__open 0 nsec
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__load 0 nsec
> test_map_queue_stack_nesting_success:PASS:test_queue_stack_nested_map__attach 0 nsec
> test_map_queue_stack_nesting_success:PASS:MAP Write 0 nsec
> test_map_queue_stack_nesting_success:PASS:no map nesting 0 nsec
> 384/2   test_queue_stack_nested_map/map_stack_nesting:OK
> 384     test_queue_stack_nested_map:OK
> Summary: 1/2 PASSED, 0 SKIPPED, 0 FAILED
>
> Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
> ---

CI fails on s390
https://github.com/kernel-patches/bpf/actions/runs/9081519831/job/24957489598?pr=7031
A different method of triggering deadlock is required. Seems like
_raw_spin_lock_irqsave being available everywhere cannot be relied
upon.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps
  2024-05-14 12:40 ` [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps Siddharth Chintamaneni
@ 2024-05-15 17:32   ` Kumar Kartikeya Dwivedi
  2024-05-16 14:04   ` Barret Rhoden
  2024-05-17  1:53   ` Hou Tao
  2 siblings, 0 replies; 12+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2024-05-15 17:32 UTC (permalink / raw)
  To: Siddharth Chintamaneni
  Cc: bpf, alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
	rjsu26, sairoop, miloc, syzbot+8bdfc2c53fb2b63e1871

On Tue, 14 May 2024 at 14:41, Siddharth Chintamaneni
<sidchintamaneni@gmail.com> wrote:
>
> This patch is a revised version which addresses a possible deadlock issue in
> queue and stack map types.
>
> Deadlock could happen when a nested BPF program acquires the same lock
> as the parent BPF program to perform a write operation on the same map
> as the first one. This bug is also reported by syzbot.
>
> Link: https://lore.kernel.org/lkml/0000000000004c3fc90615f37756@google.com/
> Reported-by: syzbot+8bdfc2c53fb2b63e1871@syzkaller.appspotmail.com
> Fixes: f1a2e44a3aec ("bpf: add queue and stack maps")
> Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
> ---

Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>

There are a couple of extra newlines, it's minor but can also fix them
if you respin.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map
  2024-05-15 17:02 ` [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Kumar Kartikeya Dwivedi
@ 2024-05-15 17:44   ` Siddharth Chintamaneni
  2024-05-15 17:56     ` Kumar Kartikeya Dwivedi
  0 siblings, 1 reply; 12+ messages in thread
From: Siddharth Chintamaneni @ 2024-05-15 17:44 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: bpf, alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
	rjsu26, sairoop, miloc

> CI fails on s390
> https://github.com/kernel-patches/bpf/actions/runs/9081519831/job/24957489598?pr=7031
> A different method of triggering deadlock is required. Seems like
> _raw_spin_lock_irqsave being available everywhere cannot be relied
> upon.

The other functions which are in the critical section are getting
inlined so I have used
_raw_spin_lock_irqsave to write the selftests.

Other approach could be to just pass the tests if the function is
getting inlined just like in
https://elixir.bootlin.com/linux/latest/source/tools/testing/selftests/bpf/prog_tests/htab_update.c

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map
  2024-05-15 17:44   ` Siddharth Chintamaneni
@ 2024-05-15 17:56     ` Kumar Kartikeya Dwivedi
  2024-05-15 17:58       ` Kumar Kartikeya Dwivedi
  0 siblings, 1 reply; 12+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2024-05-15 17:56 UTC (permalink / raw)
  To: Siddharth Chintamaneni
  Cc: bpf, alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
	rjsu26, sairoop, miloc

On Wed, 15 May 2024 at 19:44, Siddharth Chintamaneni
<sidchintamaneni@gmail.com> wrote:
>
> > CI fails on s390
> > https://github.com/kernel-patches/bpf/actions/runs/9081519831/job/24957489598?pr=7031
> > A different method of triggering deadlock is required. Seems like
> > _raw_spin_lock_irqsave being available everywhere cannot be relied
> > upon.
>
> The other functions which are in the critical section are getting
> inlined so I have used
> _raw_spin_lock_irqsave to write the selftests.
>
> Other approach could be to just pass the tests if the function is
> getting inlined just like in
> https://elixir.bootlin.com/linux/latest/source/tools/testing/selftests/bpf/prog_tests/htab_update.c

Yeah, it is certainly tricky.
Skipping seems fragile because what if x86 and others also inline the
function? Then this test would simply report success while not
testing anything.

One option is to place it at trace_contention_begin, and spawn
multiple threads in the test and try until you hit -EBUSY (due to
increased contention, leading to queued_spin_lock_slowpath being
called and the tracepoint being hit).

The other option would be to add a dummy empty call within the
critical section marked as noinline, and then attach the BPF program
there. But I think this might not be liked by everyone since we're
introducing code in the kernel just to test stuff.

So option 1 seems better to me, but the test needs to be set up
carefully to ensure contention occurs.
Others can chime in with better ideas.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map
  2024-05-15 17:56     ` Kumar Kartikeya Dwivedi
@ 2024-05-15 17:58       ` Kumar Kartikeya Dwivedi
  0 siblings, 0 replies; 12+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2024-05-15 17:58 UTC (permalink / raw)
  To: Siddharth Chintamaneni
  Cc: bpf, alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
	rjsu26, sairoop, miloc

On Wed, 15 May 2024 at 19:56, Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
> On Wed, 15 May 2024 at 19:44, Siddharth Chintamaneni
> <sidchintamaneni@gmail.com> wrote:
> >
> > > CI fails on s390
> > > https://github.com/kernel-patches/bpf/actions/runs/9081519831/job/24957489598?pr=7031
> > > A different method of triggering deadlock is required. Seems like
> > > _raw_spin_lock_irqsave being available everywhere cannot be relied
> > > upon.
> >
> > The other functions which are in the critical section are getting
> > inlined so I have used
> > _raw_spin_lock_irqsave to write the selftests.
> >
> > Other approach could be to just pass the tests if the function is
> > getting inlined just like in
> > https://elixir.bootlin.com/linux/latest/source/tools/testing/selftests/bpf/prog_tests/htab_update.c
>
> Yeah, it is certainly tricky.
> Skipping seems fragile because what if x86 and others also inline the
> function? Then this test would simply report success while not
> testing anything.
>
> One option is to place it at trace_contention_begin, and spawn

Sorry, this should be trace_contention_end, the lock is only held at that point.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps
  2024-05-14 12:40 ` [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps Siddharth Chintamaneni
  2024-05-15 17:32   ` Kumar Kartikeya Dwivedi
@ 2024-05-16 14:04   ` Barret Rhoden
  2024-05-16 14:34     ` Kumar Kartikeya Dwivedi
  2024-05-17  1:53   ` Hou Tao
  2 siblings, 1 reply; 12+ messages in thread
From: Barret Rhoden @ 2024-05-16 14:04 UTC (permalink / raw)
  To: Siddharth Chintamaneni
  Cc: bpf, alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
	rjsu26, sairoop, miloc, memxor, syzbot+8bdfc2c53fb2b63e1871

On 5/14/24 08:40, Siddharth Chintamaneni wrote:
[...]
> +static inline int map_lock_inc(struct bpf_queue_stack *qs)
> +{
> +	unsigned long flags;
> +
> +	preempt_disable();
> +	local_irq_save(flags);
> +	if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
> +		__this_cpu_dec(*(qs->map_locked));
> +		local_irq_restore(flags);
> +		preempt_enable();
> +		return -EBUSY;
> +	}
> +
> +	local_irq_restore(flags);
> +	preempt_enable();

it looks like you're taking the approach from kernel/bpf/hashtab.c to 
use a per-cpu lock before grabbing the real lock.  but in the success 
case here (where you incremented the percpu counter), you're enabling 
irqs and preemption.

what happens if you get preempted right after this?  you've left the 
per-cpu bit set, but then you run on another cpu.

possible alternative: instead of splitting the overall lock into "grab 
percpu lock, then grab real lock", have a single function for both, 
similar to htab_lock_bucket().  and keep irqs and preemption off from 
the moment you start attempting the overall lock until you completely 
unlock.

barret


> +
> +	return 0;
> +}
> +
> +static inline void map_unlock_dec(struct bpf_queue_stack *qs)
> +{
> +	unsigned long flags;
> +
> +	preempt_disable();
> +	local_irq_save(flags);
> +	__this_cpu_dec(*(qs->map_locked));
> +	local_irq_restore(flags);
> +	preempt_enable();
> +}
> +
>   static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
>   {
>   	struct bpf_queue_stack *qs = bpf_queue_stack(map);
>   	unsigned long flags;
>   	int err = 0;
>   	void *ptr;
> +	int ret;
> +
> +	ret = map_lock_inc(qs);
> +	if (ret)
> +		return ret;
>   
>   	if (in_nmi()) {
> -		if (!raw_spin_trylock_irqsave(&qs->lock, flags))
> +		if (!raw_spin_trylock_irqsave(&qs->lock, flags)) {
> +			map_unlock_dec(qs);
>   			return -EBUSY;
> +		}
>   	} else {
>   		raw_spin_lock_irqsave(&qs->lock, flags);
>   	}
> @@ -121,6 +170,8 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
>   
>   out:
>   	raw_spin_unlock_irqrestore(&qs->lock, flags);
> +	map_unlock_dec(qs);
> +
>   	return err;
>   }
>   
> @@ -132,10 +183,17 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
>   	int err = 0;
>   	void *ptr;
>   	u32 index;
> +	int ret;
> +
> +	ret = map_lock_inc(qs);
> +	if (ret)
> +		return ret;
>   
>   	if (in_nmi()) {
> -		if (!raw_spin_trylock_irqsave(&qs->lock, flags))
> +		if (!raw_spin_trylock_irqsave(&qs->lock, flags)) {
> +			map_unlock_dec(qs);
>   			return -EBUSY;
> +		}
>   	} else {
>   		raw_spin_lock_irqsave(&qs->lock, flags);
>   	}
> @@ -158,6 +216,8 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
>   
>   out:
>   	raw_spin_unlock_irqrestore(&qs->lock, flags);
> +	map_unlock_dec(qs);
> +
>   	return err;
>   }
>   
> @@ -193,6 +253,7 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
>   	unsigned long irq_flags;
>   	int err = 0;
>   	void *dst;
> +	int ret;
>   
>   	/* BPF_EXIST is used to force making room for a new element in case the
>   	 * map is full
> @@ -203,9 +264,16 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
>   	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
>   		return -EINVAL;
>   
> +
> +	ret = map_lock_inc(qs);
> +	if (ret)
> +		return ret;
> +
>   	if (in_nmi()) {
> -		if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
> +		if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags)) {
> +			map_unlock_dec(qs);
>   			return -EBUSY;
> +		}
>   	} else {
>   		raw_spin_lock_irqsave(&qs->lock, irq_flags);
>   	}
> @@ -228,6 +296,8 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
>   
>   out:
>   	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
> +	map_unlock_dec(qs);
> +
>   	return err;
>   }
>   


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps
  2024-05-16 14:04   ` Barret Rhoden
@ 2024-05-16 14:34     ` Kumar Kartikeya Dwivedi
  2024-05-16 15:31       ` Siddharth Chintamaneni
  0 siblings, 1 reply; 12+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2024-05-16 14:34 UTC (permalink / raw)
  To: Barret Rhoden
  Cc: Siddharth Chintamaneni, bpf, alexei.starovoitov, daniel, olsajiri,
	andrii, yonghong.song, rjsu26, sairoop, miloc,
	syzbot+8bdfc2c53fb2b63e1871

On Thu, 16 May 2024 at 16:05, Barret Rhoden <brho@google.com> wrote:
>
> On 5/14/24 08:40, Siddharth Chintamaneni wrote:
> [...]
> > +static inline int map_lock_inc(struct bpf_queue_stack *qs)
> > +{
> > +     unsigned long flags;
> > +
> > +     preempt_disable();
> > +     local_irq_save(flags);
> > +     if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
> > +             __this_cpu_dec(*(qs->map_locked));
> > +             local_irq_restore(flags);
> > +             preempt_enable();
> > +             return -EBUSY;
> > +     }
> > +
> > +     local_irq_restore(flags);
> > +     preempt_enable();
>
> it looks like you're taking the approach from kernel/bpf/hashtab.c to
> use a per-cpu lock before grabbing the real lock.  but in the success
> case here (where you incremented the percpu counter), you're enabling
> irqs and preemption.
>
> what happens if you get preempted right after this?  you've left the
> per-cpu bit set, but then you run on another cpu.

Great catch, that's a bug. It's not a problem when BPF programs call
this, as migration is disabled for them (but it's questionable whether
we should keep preemption enabled between map_inc/dec increasing the
chances of conflicts on the same CPU), but it's certainly a problem
from the syscall path.

>
> possible alternative: instead of splitting the overall lock into "grab
> percpu lock, then grab real lock", have a single function for both,
> similar to htab_lock_bucket().  and keep irqs and preemption off from
> the moment you start attempting the overall lock until you completely
> unlock.

+1.

>
> barret
>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps
  2024-05-16 14:34     ` Kumar Kartikeya Dwivedi
@ 2024-05-16 15:31       ` Siddharth Chintamaneni
  0 siblings, 0 replies; 12+ messages in thread
From: Siddharth Chintamaneni @ 2024-05-16 15:31 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: Barret Rhoden, bpf, alexei.starovoitov, daniel, olsajiri, andrii,
	yonghong.song, rjsu26, sairoop, miloc,
	syzbot+8bdfc2c53fb2b63e1871

On Thu, 16 May 2024 at 10:34, Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
> On Thu, 16 May 2024 at 16:05, Barret Rhoden <brho@google.com> wrote:
> >
> > On 5/14/24 08:40, Siddharth Chintamaneni wrote:
> > [...]
> > > +static inline int map_lock_inc(struct bpf_queue_stack *qs)
> > > +{
> > > +     unsigned long flags;
> > > +
> > > +     preempt_disable();
> > > +     local_irq_save(flags);
> > > +     if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
> > > +             __this_cpu_dec(*(qs->map_locked));
> > > +             local_irq_restore(flags);
> > > +             preempt_enable();
> > > +             return -EBUSY;
> > > +     }
> > > +
> > > +     local_irq_restore(flags);
> > > +     preempt_enable();
> >
> > it looks like you're taking the approach from kernel/bpf/hashtab.c to
> > use a per-cpu lock before grabbing the real lock.  but in the success
> > case here (where you incremented the percpu counter), you're enabling
> > irqs and preemption.
> >
> > what happens if you get preempted right after this?  you've left the
> > per-cpu bit set, but then you run on another cpu.
>
> Great catch, that's a bug. It's not a problem when BPF programs call
> this, as migration is disabled for them (but it's questionable whether
> we should keep preemption enabled between map_inc/dec increasing the
> chances of conflicts on the same CPU), but it's certainly a problem
> from the syscall path.
>

I was also thinking from the BPF programs perspective as migration is
disabled on them. I will fix this.

> >
> > possible alternative: instead of splitting the overall lock into "grab
> > percpu lock, then grab real lock", have a single function for both,
> > similar to htab_lock_bucket().  and keep irqs and preemption off from
> > the moment you start attempting the overall lock until you completely
> > unlock.
>
> +1.
>
> >
> > barret
> >

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps
  2024-05-14 12:40 ` [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps Siddharth Chintamaneni
  2024-05-15 17:32   ` Kumar Kartikeya Dwivedi
  2024-05-16 14:04   ` Barret Rhoden
@ 2024-05-17  1:53   ` Hou Tao
  2024-05-17  3:32     ` Siddharth Chintamaneni
  2 siblings, 1 reply; 12+ messages in thread
From: Hou Tao @ 2024-05-17  1:53 UTC (permalink / raw)
  To: Siddharth Chintamaneni, bpf
  Cc: alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
	rjsu26, sairoop, miloc, memxor, syzbot+8bdfc2c53fb2b63e1871

Hi,

On 5/14/2024 8:40 PM, Siddharth Chintamaneni wrote:
> This patch is a revised version which addresses a possible deadlock issue in
> queue and stack map types.
>
> Deadlock could happen when a nested BPF program acquires the same lock
> as the parent BPF program to perform a write operation on the same map
> as the first one. This bug is also reported by syzbot.
>
> Link: https://lore.kernel.org/lkml/0000000000004c3fc90615f37756@google.com/
> Reported-by: syzbot+8bdfc2c53fb2b63e1871@syzkaller.appspotmail.com
> Fixes: f1a2e44a3aec ("bpf: add queue and stack maps")
> Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
> ---
>  kernel/bpf/queue_stack_maps.c | 76 +++++++++++++++++++++++++++++++++--
>  1 file changed, 73 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
> index d869f51ea93a..b5ed76c9ddd7 100644
> --- a/kernel/bpf/queue_stack_maps.c
> +++ b/kernel/bpf/queue_stack_maps.c
> @@ -13,11 +13,13 @@
>  #define QUEUE_STACK_CREATE_FLAG_MASK \
>  	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
>  
> +
>  struct bpf_queue_stack {
>  	struct bpf_map map;
>  	raw_spinlock_t lock;
>  	u32 head, tail;
>  	u32 size; /* max_entries + 1 */
> +	int __percpu *map_locked;
>  
>  	char elements[] __aligned(8);
>  };
> @@ -78,6 +80,15 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
>  
>  	qs->size = size;
>  
> +	qs->map_locked = bpf_map_alloc_percpu(&qs->map,
> +						sizeof(int),
> +						sizeof(int),
> +						GFP_USER | __GFP_NOWARN);
> +	if (!qs->map_locked) {
> +		bpf_map_area_free(qs);
> +		return ERR_PTR(-ENOMEM);
> +	}
> +
>  	raw_spin_lock_init(&qs->lock);
>  
>  	return &qs->map;
> @@ -88,19 +99,57 @@ static void queue_stack_map_free(struct bpf_map *map)
>  {
>  	struct bpf_queue_stack *qs = bpf_queue_stack(map);
>  
> +	free_percpu(qs->map_locked);
>  	bpf_map_area_free(qs);
>  }
>  
> +static inline int map_lock_inc(struct bpf_queue_stack *qs)
> +{
> +	unsigned long flags;
> +
> +	preempt_disable();
> +	local_irq_save(flags);
> +	if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
> +		__this_cpu_dec(*(qs->map_locked));
> +		local_irq_restore(flags);
> +		preempt_enable();
> +		return -EBUSY;
> +	}
> +
> +	local_irq_restore(flags);
> +	preempt_enable();
> +
> +	return 0;
> +}
> +
> +static inline void map_unlock_dec(struct bpf_queue_stack *qs)
> +{
> +	unsigned long flags;
> +
> +	preempt_disable();
> +	local_irq_save(flags);
> +	__this_cpu_dec(*(qs->map_locked));
> +	local_irq_restore(flags);
> +	preempt_enable();
> +}
> +
>  static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
>  {
>  	struct bpf_queue_stack *qs = bpf_queue_stack(map);
>  	unsigned long flags;
>  	int err = 0;
>  	void *ptr;
> +	int ret;
> +
> +	ret = map_lock_inc(qs);
> +	if (ret)
> +		return ret;
>  
>  	if (in_nmi()) {
> -		if (!raw_spin_trylock_irqsave(&qs->lock, flags))
> +		if (!raw_spin_trylock_irqsave(&qs->lock, flags)) {
> +			map_unlock_dec(qs);
>  			return -EBUSY;
> +		}

With percpu map-locked in place, I think the in_nmi() checking could
also be remove. When the BPF program X which has already acquired the
lock is interrupted by a NMI, if the BPF program Y for the NMI also
tries to acquire the same lock, it will find map_locked is 1 and return
early.


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps
  2024-05-17  1:53   ` Hou Tao
@ 2024-05-17  3:32     ` Siddharth Chintamaneni
  0 siblings, 0 replies; 12+ messages in thread
From: Siddharth Chintamaneni @ 2024-05-17  3:32 UTC (permalink / raw)
  To: Hou Tao
  Cc: bpf, alexei.starovoitov, daniel, olsajiri, andrii, yonghong.song,
	rjsu26, sairoop, miloc, memxor, syzbot+8bdfc2c53fb2b63e1871

On Thu, 16 May 2024 at 21:53, Hou Tao <houtao@huaweicloud.com> wrote:
>
> Hi,
>
> On 5/14/2024 8:40 PM, Siddharth Chintamaneni wrote:
> > This patch is a revised version which addresses a possible deadlock issue in
> > queue and stack map types.
> >
> > Deadlock could happen when a nested BPF program acquires the same lock
> > as the parent BPF program to perform a write operation on the same map
> > as the first one. This bug is also reported by syzbot.
> >
> > Link: https://lore.kernel.org/lkml/0000000000004c3fc90615f37756@google.com/
> > Reported-by: syzbot+8bdfc2c53fb2b63e1871@syzkaller.appspotmail.com
> > Fixes: f1a2e44a3aec ("bpf: add queue and stack maps")
> > Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
> > ---
> >  kernel/bpf/queue_stack_maps.c | 76 +++++++++++++++++++++++++++++++++--
> >  1 file changed, 73 insertions(+), 3 deletions(-)
> >
> > diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
> > index d869f51ea93a..b5ed76c9ddd7 100644
> > --- a/kernel/bpf/queue_stack_maps.c
> > +++ b/kernel/bpf/queue_stack_maps.c
> > @@ -13,11 +13,13 @@
> >  #define QUEUE_STACK_CREATE_FLAG_MASK \
> >       (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
> >
> > +
> >  struct bpf_queue_stack {
> >       struct bpf_map map;
> >       raw_spinlock_t lock;
> >       u32 head, tail;
> >       u32 size; /* max_entries + 1 */
> > +     int __percpu *map_locked;
> >
> >       char elements[] __aligned(8);
> >  };
> > @@ -78,6 +80,15 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
> >
> >       qs->size = size;
> >
> > +     qs->map_locked = bpf_map_alloc_percpu(&qs->map,
> > +                                             sizeof(int),
> > +                                             sizeof(int),
> > +                                             GFP_USER | __GFP_NOWARN);
> > +     if (!qs->map_locked) {
> > +             bpf_map_area_free(qs);
> > +             return ERR_PTR(-ENOMEM);
> > +     }
> > +
> >       raw_spin_lock_init(&qs->lock);
> >
> >       return &qs->map;
> > @@ -88,19 +99,57 @@ static void queue_stack_map_free(struct bpf_map *map)
> >  {
> >       struct bpf_queue_stack *qs = bpf_queue_stack(map);
> >
> > +     free_percpu(qs->map_locked);
> >       bpf_map_area_free(qs);
> >  }
> >
> > +static inline int map_lock_inc(struct bpf_queue_stack *qs)
> > +{
> > +     unsigned long flags;
> > +
> > +     preempt_disable();
> > +     local_irq_save(flags);
> > +     if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
> > +             __this_cpu_dec(*(qs->map_locked));
> > +             local_irq_restore(flags);
> > +             preempt_enable();
> > +             return -EBUSY;
> > +     }
> > +
> > +     local_irq_restore(flags);
> > +     preempt_enable();
> > +
> > +     return 0;
> > +}
> > +
> > +static inline void map_unlock_dec(struct bpf_queue_stack *qs)
> > +{
> > +     unsigned long flags;
> > +
> > +     preempt_disable();
> > +     local_irq_save(flags);
> > +     __this_cpu_dec(*(qs->map_locked));
> > +     local_irq_restore(flags);
> > +     preempt_enable();
> > +}
> > +
> >  static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
> >  {
> >       struct bpf_queue_stack *qs = bpf_queue_stack(map);
> >       unsigned long flags;
> >       int err = 0;
> >       void *ptr;
> > +     int ret;
> > +
> > +     ret = map_lock_inc(qs);
> > +     if (ret)
> > +             return ret;
> >
> >       if (in_nmi()) {
> > -             if (!raw_spin_trylock_irqsave(&qs->lock, flags))
> > +             if (!raw_spin_trylock_irqsave(&qs->lock, flags)) {
> > +                     map_unlock_dec(qs);
> >                       return -EBUSY;
> > +             }
>
> With percpu map-locked in place, I think the in_nmi() checking could
> also be remove. When the BPF program X which has already acquired the
> lock is interrupted by a NMI, if the BPF program Y for the NMI also
> tries to acquire the same lock, it will find map_locked is 1 and return
> early.

Agreed. Same thing could be done for ringbuf as well. I will fix this
in the revision for both the patches.

>
>

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2024-05-17  3:32 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-05-14 12:40 [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Siddharth Chintamaneni
2024-05-14 12:40 ` [PATCH v3 bpf-next 1/2] bpf: Patch to Fix deadlocks in queue and stack maps Siddharth Chintamaneni
2024-05-15 17:32   ` Kumar Kartikeya Dwivedi
2024-05-16 14:04   ` Barret Rhoden
2024-05-16 14:34     ` Kumar Kartikeya Dwivedi
2024-05-16 15:31       ` Siddharth Chintamaneni
2024-05-17  1:53   ` Hou Tao
2024-05-17  3:32     ` Siddharth Chintamaneni
2024-05-15 17:02 ` [PATCH v3 bpf-next 2/2] selftests/bpf: Added selftests to check deadlocks in queue and stack map Kumar Kartikeya Dwivedi
2024-05-15 17:44   ` Siddharth Chintamaneni
2024-05-15 17:56     ` Kumar Kartikeya Dwivedi
2024-05-15 17:58       ` Kumar Kartikeya Dwivedi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox