BPF List
 help / color / mirror / Atom feed
From: Dave Marchevsky <davemarchevsky@fb.com>
To: <bpf@vger.kernel.org>
Cc: Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Andrii Nakryiko <andrii@kernel.org>,
	Kernel Team <kernel-team@fb.com>,
	Dave Marchevsky <davemarchevsky@fb.com>
Subject: [RFCv2 PATCH bpf-next 17/18] selftests/bpf: Lock tracking test changes
Date: Tue, 30 Aug 2022 10:27:58 -0700	[thread overview]
Message-ID: <20220830172759.4069786-18-davemarchevsky@fb.com> (raw)
In-Reply-To: <20220830172759.4069786-1-davemarchevsky@fb.com>

This patch contains test changes corresponding to the functional changes
in "bpf: Verifier tracking of rbtree_spin_lock held". It will be
squashed with other test patches, leaving in this state for RFCv2
feedback.

iter section of rbtree_map.c prog is commented out because iter helpers
will be tossed anyways.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
---
 .../selftests/bpf/prog_tests/rbtree_map.c     |  2 +-
 .../testing/selftests/bpf/progs/rbtree_map.c  | 16 ++++++++-------
 .../selftests/bpf/progs/rbtree_map_fail.c     | 20 +++++++++----------
 3 files changed, 20 insertions(+), 18 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree_map.c b/tools/testing/selftests/bpf/prog_tests/rbtree_map.c
index 17cadcd05ee4..7634a2d93f0b 100644
--- a/tools/testing/selftests/bpf/prog_tests/rbtree_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/rbtree_map.c
@@ -17,7 +17,7 @@ static struct {
 	{"rb_node__field_store", "only read is supported"},
 	{"rb_node__alloc_no_add", "Unreleased reference id=2 alloc_insn=3"},
 	{"rb_node__two_alloc_one_add", "Unreleased reference id=2 alloc_insn=3"},
-	{"rb_node__remove_no_free", "Unreleased reference id=5 alloc_insn=28"},
+	{"rb_node__remove_no_free", "Unreleased reference id=6 alloc_insn=26"},
 	{"rb_tree__add_wrong_type", "rbtree: R2 is of type task_struct but node_data is expected"},
 	{"rb_tree__conditional_release_helper_usage",
 		"R2 type=ptr_cond_rel_ expected=ptr_"},
diff --git a/tools/testing/selftests/bpf/progs/rbtree_map.c b/tools/testing/selftests/bpf/progs/rbtree_map.c
index 50f29b9a5b82..957672cce82a 100644
--- a/tools/testing/selftests/bpf/progs/rbtree_map.c
+++ b/tools/testing/selftests/bpf/progs/rbtree_map.c
@@ -65,6 +65,7 @@ int check_rbtree(void *ctx)
 	struct node_data *node, *found, *ret;
 	struct node_data popped;
 	struct node_data search;
+	struct bpf_spin_lock *lock;
 	__u32 search2;
 
 	node = bpf_rbtree_alloc_node(&rbtree, sizeof(struct node_data));
@@ -73,7 +74,8 @@ int check_rbtree(void *ctx)
 
 	node->one = calls;
 	node->two = 6;
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	lock = &rbtree_lock;
+	bpf_rbtree_lock(lock);
 
 	ret = (struct node_data *)bpf_rbtree_add(&rbtree, node, less);
 	if (!ret) {
@@ -81,28 +83,28 @@ int check_rbtree(void *ctx)
 		goto unlock_ret;
 	}
 
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(lock);
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(lock);
 
 	search.one = calls;
 	found = (struct node_data *)bpf_rbtree_find(&rbtree, &search, cmp);
 	if (!found)
 		goto unlock_ret;
 
-	int node_ct = 0;
+	/*int node_ct = 0;
 	struct node_data *iter = (struct node_data *)bpf_rbtree_first(&rbtree);
 
 	while (iter) {
 		node_ct++;
 		iter = (struct node_data *)bpf_rbtree_next(&rbtree, iter);
-	}
+	}*/
 
 	ret = (struct node_data *)bpf_rbtree_remove(&rbtree, found);
 	if (!ret)
 		goto unlock_ret;
 
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(lock);
 
 	bpf_rbtree_free_node(&rbtree, ret);
 
@@ -110,7 +112,7 @@ int check_rbtree(void *ctx)
 	return 0;
 
 unlock_ret:
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }
 
diff --git a/tools/testing/selftests/bpf/progs/rbtree_map_fail.c b/tools/testing/selftests/bpf/progs/rbtree_map_fail.c
index ab4002a8211c..779b85294f37 100644
--- a/tools/testing/selftests/bpf/progs/rbtree_map_fail.c
+++ b/tools/testing/selftests/bpf/progs/rbtree_map_fail.c
@@ -61,7 +61,7 @@ int alloc_node__size_too_small(void *ctx)
 		return 0;
 	}
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(&rbtree_lock);
 	/* will never execute, alloc_node should fail */
 	node->one = 1;
 	ret = bpf_rbtree_add(&rbtree, node, less);
@@ -71,7 +71,7 @@ int alloc_node__size_too_small(void *ctx)
 	}
 
 unlock_ret:
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }
 
@@ -148,7 +148,7 @@ int rb_node__two_alloc_one_add(void *ctx)
 		return 0;
 	node->one = 42;
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(&rbtree_lock);
 
 	ret = bpf_rbtree_add(&rbtree, node, less);
 	if (!ret) {
@@ -157,7 +157,7 @@ int rb_node__two_alloc_one_add(void *ctx)
 	}
 
 unlock_ret:
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }
 
@@ -171,7 +171,7 @@ int rb_node__remove_no_free(void *ctx)
 		return 0;
 	node->one = 42;
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(&rbtree_lock);
 
 	ret = bpf_rbtree_add(&rbtree, node, less);
 	if (!ret) {
@@ -188,7 +188,7 @@ int rb_node__remove_no_free(void *ctx)
 	 */
 
 unlock_ret:
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }
 
@@ -202,14 +202,14 @@ int rb_tree__add_wrong_type(void *ctx)
 
 	task = bpf_get_current_task_btf();
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(&rbtree_lock);
 
 	ret = bpf_rbtree_add(&rbtree, task, less);
 	/* Verifier should fail at bpf_rbtree_add, so don't bother handling
 	 * failure.
 	 */
 
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }
 
@@ -223,7 +223,7 @@ int rb_tree__conditional_release_helper_usage(void *ctx)
 		return 0;
 	node->one = 42;
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(&rbtree_lock);
 
 	ret = bpf_rbtree_add(&rbtree, node, less);
 	/* Verifier should fail when trying to use CONDITIONAL_RELEASE
@@ -236,7 +236,7 @@ int rb_tree__conditional_release_helper_usage(void *ctx)
 	}
 
 unlock_ret:
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }
 
-- 
2.30.2


  parent reply	other threads:[~2022-08-30 17:51 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-30 17:27 [RFCv2 PATCH bpf-next 00/18] bpf: Introduce rbtree map Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 01/18] bpf: Add verifier support for custom callback return range Dave Marchevsky
2022-09-01 21:01   ` Joanne Koong
2022-09-06 23:42     ` Dave Marchevsky
2022-09-07  1:53       ` Alexei Starovoitov
2022-09-08 21:36         ` Dave Marchevsky
2022-09-08 21:40           ` Alexei Starovoitov
2022-09-08 23:10             ` Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 02/18] bpf: Add verifier check for BPF_PTR_POISON retval and arg Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 03/18] bpf: Add rb_node_off to bpf_map Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 04/18] bpf: Add rbtree map Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 05/18] libbpf: Add support for private BSS map section Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 06/18] bpf: Add bpf_spin_lock member to rbtree Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 07/18] bpf: Add bpf_rbtree_{lock,unlock} helpers Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 08/18] bpf: Enforce spinlock hold for bpf_rbtree_{add,remove,find} Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 09/18] bpf: Support declarative association of lock with rbtree map Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 10/18] bpf: Verifier tracking of rbtree_spin_lock held Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 11/18] bpf: Check rbtree lock held during verification Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 12/18] bpf: Add OBJ_NON_OWNING_REF type flag Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 13/18] bpf: Add CONDITIONAL_RELEASE " Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 14/18] bpf: Introduce PTR_ITER and PTR_ITER_END type flags Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 15/18] selftests/bpf: Add rbtree map tests Dave Marchevsky
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 16/18] selftests/bpf: Declarative lock definition test changes Dave Marchevsky
2022-08-30 17:27 ` Dave Marchevsky [this message]
2022-08-30 17:27 ` [RFCv2 PATCH bpf-next 18/18] selftests/bpf: Rbtree static lock verification " Dave Marchevsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220830172759.4069786-18-davemarchevsky@fb.com \
    --to=davemarchevsky@fb.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=kernel-team@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox