From: Kaitao cheng <kaitao.cheng@linux.dev>
To: martin.lau@linux.dev, ast@kernel.org, daniel@iogearbox.net,
andrii@kernel.org, eddyz87@gmail.com, song@kernel.org,
yonghong.song@linux.dev, john.fastabend@gmail.com,
kpsingh@kernel.org, sdf@fomichev.me, haoluo@google.com,
jolsa@kernel.org, shuah@kernel.org, chengkaitao@kylinos.cn,
linux-kselftest@vger.kernel.org
Cc: bpf@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH bpf-next v10 8/8] selftests/bpf: Add test cases for bpf_list_del/add/is_first/is_last/empty
Date: Tue, 28 Apr 2026 00:59:06 +0800 [thread overview]
Message-ID: <20260427165906.84420-9-kaitao.cheng@linux.dev> (raw)
In-Reply-To: <20260427165906.84420-1-kaitao.cheng@linux.dev>
From: Kaitao Cheng <chengkaitao@kylinos.cn>
Extend refcounted_kptr with tests for bpf_list_add (including prev from
bpf_list_front and bpf_refcount_acquire), bpf_list_del (including node
from bpf_list_front, bpf_rbtree_remove and bpf_refcount_acquire),
bpf_list_empty, bpf_list_is_first/last, and push_back on uninit head.
To verify the validity of bpf_list_del/add, the test also expects the
verifier to reject calls to bpf_list_del/add made without holding the
spin_lock.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
.../selftests/bpf/progs/refcounted_kptr.c | 421 ++++++++++++++++++
1 file changed, 421 insertions(+)
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
index c847398837cc..21ae06797b18 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
@@ -367,6 +367,427 @@ long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \
INSERT_STASH_READ(true, "insert_stash_read: remove from tree");
INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree");
+SEC("tc")
+__description("list_empty_test: list empty before add, non-empty after add")
+__success __retval(0)
+int list_empty_test(void *ctx)
+{
+ struct node_data *node_new;
+
+ bpf_spin_lock(&lock);
+ if (!bpf_list_empty(&head)) {
+ bpf_spin_unlock(&lock);
+ return -1;
+ }
+ bpf_spin_unlock(&lock);
+
+ node_new = bpf_obj_new(typeof(*node_new));
+ if (!node_new)
+ return -2;
+
+ bpf_spin_lock(&lock);
+ bpf_list_push_front(&head, &node_new->l);
+
+ if (bpf_list_empty(&head)) {
+ bpf_spin_unlock(&lock);
+ return -3;
+ }
+ bpf_spin_unlock(&lock);
+ return 0;
+}
+
+static struct node_data *__add_in_list(struct bpf_list_head *head,
+ struct bpf_spin_lock *lock)
+{
+ struct node_data *node_new, *node_ref;
+
+ node_new = bpf_obj_new(typeof(*node_new));
+ if (!node_new)
+ return NULL;
+
+ node_ref = bpf_refcount_acquire(node_new);
+
+ bpf_spin_lock(lock);
+ bpf_list_push_front(head, &node_new->l);
+ bpf_spin_unlock(lock);
+ return node_ref;
+}
+
+SEC("tc")
+__description("list_is_edge_test1: is_first on first node, is_last on last node")
+__success __retval(0)
+int list_is_edge_test1(void *ctx)
+{
+ struct node_data *node_first, *node_last;
+ int err = 0;
+
+ node_last = __add_in_list(&head, &lock);
+ if (!node_last)
+ return -1;
+
+ node_first = __add_in_list(&head, &lock);
+ if (!node_first) {
+ bpf_obj_drop(node_last);
+ return -2;
+ }
+
+ bpf_spin_lock(&lock);
+ if (!bpf_list_is_first(&head, &node_first->l)) {
+ err = -3;
+ goto fail;
+ }
+ if (!bpf_list_is_last(&head, &node_last->l))
+ err = -4;
+
+fail:
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(node_first);
+ bpf_obj_drop(node_last);
+ return err;
+}
+
+SEC("tc")
+__description("list_is_edge_test2: accept list_front/list_back return value")
+__success __retval(0)
+int list_is_edge_test2(void *ctx)
+{
+ struct bpf_list_node *front, *back;
+ struct node_data *a, *b;
+ long err = 0;
+
+ a = __add_in_list(&head, &lock);
+ if (!a)
+ return -1;
+
+ b = __add_in_list(&head, &lock);
+ if (!b) {
+ bpf_obj_drop(a);
+ return -2;
+ }
+
+ bpf_spin_lock(&lock);
+ front = bpf_list_front(&head);
+ back = bpf_list_back(&head);
+ if (!front || !back) {
+ err = -3;
+ goto out_unlock;
+ }
+
+ if (!bpf_list_is_first(&head, front) || bpf_list_is_last(&head, front)) {
+ err = -4;
+ goto out_unlock;
+ }
+
+ if (!bpf_list_is_last(&head, back) || bpf_list_is_first(&head, back)) {
+ err = -5;
+ goto out_unlock;
+ }
+
+out_unlock:
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(a);
+ bpf_obj_drop(b);
+ return err;
+}
+
+SEC("tc")
+__description("list_is_edge_test3: single node is both first and last")
+__success __retval(0)
+int list_is_edge_test3(void *ctx)
+{
+ struct node_data *tmp;
+ struct bpf_list_node *node;
+ long err = 0;
+
+ tmp = __add_in_list(&head, &lock);
+ if (!tmp)
+ return -1;
+
+ bpf_spin_lock(&lock);
+ node = bpf_list_front(&head);
+ if (!node) {
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(tmp);
+ return -2;
+ }
+
+ if (!bpf_list_is_first(&head, node) || !bpf_list_is_last(&head, node))
+ err = -3;
+ bpf_spin_unlock(&lock);
+
+ bpf_obj_drop(tmp);
+ return err;
+}
+
+SEC("tc")
+__description("list_del_test1: del returns removed nodes")
+__success __retval(0)
+int list_del_test1(void *ctx)
+{
+ struct node_data *node_first, *node_last;
+ struct bpf_list_node *bpf_node_first, *bpf_node_last;
+ int err = 0;
+
+ node_last = __add_in_list(&head, &lock);
+ if (!node_last)
+ return -1;
+
+ node_first = __add_in_list(&head, &lock);
+ if (!node_first) {
+ bpf_obj_drop(node_last);
+ return -2;
+ }
+
+ bpf_spin_lock(&lock);
+ bpf_node_last = bpf_list_del(&head, &node_last->l);
+ bpf_node_first = bpf_list_del(&head, &node_first->l);
+ bpf_spin_unlock(&lock);
+
+ if (bpf_node_first)
+ bpf_obj_drop(container_of(bpf_node_first, struct node_data, l));
+ else
+ err = -3;
+
+ if (bpf_node_last)
+ bpf_obj_drop(container_of(bpf_node_last, struct node_data, l));
+ else
+ err = -4;
+
+ bpf_obj_drop(node_first);
+ bpf_obj_drop(node_last);
+ return err;
+}
+
+SEC("tc")
+__description("list_del_test2: remove an arbitrary node from the list")
+__success __retval(0)
+int list_del_test2(void *ctx)
+{
+ struct bpf_rb_node *rb;
+ struct bpf_list_node *l;
+ struct node_data *n;
+ long err;
+
+ err = __insert_in_tree_and_list(&head, &root, &lock);
+ if (err)
+ return err;
+
+ bpf_spin_lock(&lock);
+ rb = bpf_rbtree_first(&root);
+ if (!rb) {
+ bpf_spin_unlock(&lock);
+ return -4;
+ }
+
+ rb = bpf_rbtree_remove(&root, rb);
+ if (!rb) {
+ bpf_spin_unlock(&lock);
+ return -5;
+ }
+
+ n = container_of(rb, struct node_data, r);
+ l = bpf_list_del(&head, &n->l);
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(n);
+ if (!l)
+ return -6;
+
+ bpf_obj_drop(container_of(l, struct node_data, l));
+ return 0;
+}
+
+SEC("tc")
+__description("list_del_test3: list_del accepts list_front return value as node")
+__success __retval(0)
+int list_del_test3(void *ctx)
+{
+ struct node_data *tmp;
+ struct bpf_list_node *bpf_node, *l;
+ long err = 0;
+
+ tmp = __add_in_list(&head, &lock);
+ if (!tmp)
+ return -1;
+
+ bpf_spin_lock(&lock);
+ bpf_node = bpf_list_front(&head);
+ if (!bpf_node) {
+ bpf_spin_unlock(&lock);
+ err = -2;
+ goto fail;
+ }
+
+ l = bpf_list_del(&head, bpf_node);
+ bpf_spin_unlock(&lock);
+ if (!l) {
+ err = -3;
+ goto fail;
+ }
+
+ bpf_obj_drop(container_of(l, struct node_data, l));
+ bpf_obj_drop(tmp);
+ return 0;
+
+fail:
+ bpf_obj_drop(tmp);
+ return err;
+}
+
+SEC("tc")
+__description("list_add_test1: insert new node after prev")
+__success __retval(0)
+int list_add_test1(void *ctx)
+{
+ struct node_data *node_first;
+ struct node_data *new_node;
+ long err = 0;
+
+ node_first = __add_in_list(&head, &lock);
+ if (!node_first)
+ return -1;
+
+ new_node = bpf_obj_new(typeof(*new_node));
+ if (!new_node) {
+ err = -2;
+ goto fail;
+ }
+
+ bpf_spin_lock(&lock);
+ err = bpf_list_add(&head, &new_node->l, &node_first->l);
+ bpf_spin_unlock(&lock);
+ if (err) {
+ err = -3;
+ goto fail;
+ }
+
+fail:
+ bpf_obj_drop(node_first);
+ return err;
+}
+
+SEC("tc")
+__description("list_add_test2: list_add accepts list_front return value as prev")
+__success __retval(0)
+int list_add_test2(void *ctx)
+{
+ struct node_data *new_node, *tmp;
+ struct bpf_list_node *bpf_node;
+ long err = 0;
+
+ tmp = __add_in_list(&head, &lock);
+ if (!tmp)
+ return -1;
+
+ new_node = bpf_obj_new(typeof(*new_node));
+ if (!new_node) {
+ err = -2;
+ goto fail;
+ }
+
+ bpf_spin_lock(&lock);
+ bpf_node = bpf_list_front(&head);
+ if (!bpf_node) {
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(new_node);
+ err = -3;
+ goto fail;
+ }
+
+ err = bpf_list_add(&head, &new_node->l, bpf_node);
+ bpf_spin_unlock(&lock);
+ if (err) {
+ err = -4;
+ goto fail;
+ }
+
+fail:
+ bpf_obj_drop(tmp);
+ return err;
+}
+
+struct uninit_head_val {
+ struct bpf_spin_lock lock;
+ struct bpf_list_head head __contains(node_data, l);
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct uninit_head_val);
+ __uint(max_entries, 1);
+} uninit_head_map SEC(".maps");
+
+SEC("tc")
+__description("list_push_back_uninit_head: push_back on 0-initialized list head")
+__success __retval(0)
+int list_push_back_uninit_head(void *ctx)
+{
+ struct uninit_head_val *st;
+ struct node_data *node;
+ int ret = -1, key = 0;
+
+ st = bpf_map_lookup_elem(&uninit_head_map, &key);
+ if (!st)
+ return -1;
+
+ node = bpf_obj_new(typeof(*node));
+ if (!node)
+ return -1;
+
+ bpf_spin_lock(&st->lock);
+ ret = bpf_list_push_back(&st->head, &node->l);
+ bpf_spin_unlock(&st->lock);
+
+ return ret;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=32 must be held for bpf_list_head")
+long list_del_without_lock_fail(void *ctx)
+{
+ struct node_data *n;
+ struct bpf_list_node *l;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return -1;
+
+ /* Error case: delete list node without holding lock */
+ l = bpf_list_del(&head, &n->l);
+ bpf_obj_drop(n);
+ if (!l)
+ return -2;
+ bpf_obj_drop(container_of(l, struct node_data, l));
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=32 must be held for bpf_list_head")
+long list_add_without_lock_fail(void *ctx)
+{
+ struct node_data *n, *prev;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return -1;
+
+ prev = bpf_obj_new(typeof(*prev));
+ if (!prev) {
+ bpf_obj_drop(n);
+ return -1;
+ }
+
+ /* Error case: add list node without holding lock */
+ if (bpf_list_add(&head, &n->l, &prev->l)) {
+ bpf_obj_drop(prev);
+ bpf_obj_drop(n);
+ return -2;
+ }
+
+ return 0;
+}
+
SEC("tc")
__success
long rbtree_refcounted_node_ref_escapes(void *ctx)
--
2.50.1 (Apple Git-155)
prev parent reply other threads:[~2026-04-27 17:00 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-27 16:58 [PATCH bpf-next v10 0/8] bpf: Extend the bpf_list family of APIs Kaitao cheng
2026-04-27 16:58 ` [PATCH bpf-next v10 1/8] bpf: refactor __bpf_list_del to take list node pointer Kaitao cheng
2026-04-27 18:43 ` bot+bpf-ci
2026-04-28 13:52 ` Kaitao Cheng
2026-04-27 16:59 ` [PATCH bpf-next v10 2/8] bpf: clear list node owner and unlink before drop Kaitao cheng
2026-04-27 18:43 ` bot+bpf-ci
2026-04-28 14:08 ` Kaitao Cheng
2026-04-27 16:59 ` [PATCH bpf-next v10 3/8] bpf: Introduce the bpf_list_del kfunc Kaitao cheng
2026-04-27 18:43 ` bot+bpf-ci
2026-04-28 14:32 ` Kaitao Cheng
2026-04-27 16:59 ` [PATCH bpf-next v10 4/8] bpf: refactor __bpf_list_add to take insertion point via **prev_ptr Kaitao cheng
2026-04-27 16:59 ` [PATCH bpf-next v10 5/8] bpf: Add bpf_list_add to insert node after a given list node Kaitao cheng
2026-04-27 18:43 ` bot+bpf-ci
2026-04-28 14:18 ` Kaitao Cheng
2026-04-27 16:59 ` [PATCH bpf-next v10 6/8] bpf: add bpf_list_is_first/last/empty kfuncs Kaitao cheng
2026-04-27 16:59 ` [PATCH bpf-next v10 7/8] bpf: allow non-owning list-node args via __nonown_allowed Kaitao cheng
2026-04-27 16:59 ` Kaitao cheng [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260427165906.84420-9-kaitao.cheng@linux.dev \
--to=kaitao.cheng@linux.dev \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=chengkaitao@kylinos.cn \
--cc=daniel@iogearbox.net \
--cc=eddyz87@gmail.com \
--cc=haoluo@google.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kpsingh@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=martin.lau@linux.dev \
--cc=sdf@fomichev.me \
--cc=shuah@kernel.org \
--cc=song@kernel.org \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox