* [PATCH bpf-next v9 1/9] bpf: refactor kfunc checks using table-driven approach in verifier
2026-03-29 14:04 [PATCH bpf-next v9 0/9] bpf: Extend the bpf_list family of APIs Chengkaitao
@ 2026-03-29 14:04 ` Chengkaitao
2026-03-30 15:20 ` Mykyta Yatsenko
2026-03-30 17:05 ` Alexei Starovoitov
2026-03-29 14:04 ` [PATCH bpf-next v9 2/9] bpf: refactor __bpf_list_del to take list node pointer Chengkaitao
` (7 subsequent siblings)
8 siblings, 2 replies; 16+ messages in thread
From: Chengkaitao @ 2026-03-29 14:04 UTC (permalink / raw)
To: martin.lau, ast, daniel, andrii, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, chengkaitao,
linux-kselftest
Cc: bpf, linux-kernel
From: Kaitao Cheng <chengkaitao@kylinos.cn>
Replace per-kfunc btf_id chains check with btf_id_in_kfunc_table() and
static kfunc tables for easier maintenance.
Prepare for future extensions to the bpf_list API family.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
kernel/bpf/verifier.c | 261 +++++++++++++++++++++++-------------------
1 file changed, 144 insertions(+), 117 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4fbacd2149cd..f2d9863bb290 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -544,9 +544,6 @@ static bool is_async_callback_calling_kfunc(u32 btf_id);
static bool is_callback_calling_kfunc(u32 btf_id);
static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
-static bool is_bpf_wq_set_callback_kfunc(u32 btf_id);
-static bool is_task_work_add_kfunc(u32 func_id);
-
static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_for_each_map_elem ||
@@ -586,7 +583,7 @@ static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn
/* bpf_wq and bpf_task_work callbacks are always sleepable. */
if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
- (is_bpf_wq_set_callback_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
+ is_async_callback_calling_kfunc(insn->imm))
return true;
verifier_bug(env, "unhandled async callback in is_async_cb_sleepable");
@@ -11203,31 +11200,6 @@ static int set_task_work_schedule_callback_state(struct bpf_verifier_env *env,
return 0;
}
-static bool is_rbtree_lock_required_kfunc(u32 btf_id);
-
-/* Are we currently verifying the callback for a rbtree helper that must
- * be called with lock held? If so, no need to complain about unreleased
- * lock
- */
-static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
-{
- struct bpf_verifier_state *state = env->cur_state;
- struct bpf_insn *insn = env->prog->insnsi;
- struct bpf_func_state *callee;
- int kfunc_btf_id;
-
- if (!state->curframe)
- return false;
-
- callee = state->frame[state->curframe];
-
- if (!callee->in_callback_fn)
- return false;
-
- kfunc_btf_id = insn[callee->callsite].imm;
- return is_rbtree_lock_required_kfunc(kfunc_btf_id);
-}
-
static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
{
if (range.return_32bit)
@@ -12639,11 +12611,103 @@ BTF_ID(func, bpf_session_is_return)
BTF_ID(func, bpf_stream_vprintk)
BTF_ID(func, bpf_stream_print_stack)
-static bool is_task_work_add_kfunc(u32 func_id)
-{
- return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
- func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume];
-}
+/* Kfunc family related to list. */
+static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
+ KF_bpf_list_push_front_impl,
+ KF_bpf_list_push_back_impl,
+ KF_bpf_list_pop_front,
+ KF_bpf_list_pop_back,
+ KF_bpf_list_front,
+ KF_bpf_list_back,
+};
+
+/* Kfuncs that take a list node argument (bpf_list_node *). */
+static const enum special_kfunc_type bpf_list_node_api_kfuncs[] = {
+ KF_bpf_list_push_front_impl,
+ KF_bpf_list_push_back_impl,
+};
+
+/* Kfuncs that take an rbtree node argument (bpf_rb_node *). */
+static const enum special_kfunc_type bpf_rbtree_node_api_kfuncs[] = {
+ KF_bpf_rbtree_remove,
+ KF_bpf_rbtree_add_impl,
+ KF_bpf_rbtree_left,
+ KF_bpf_rbtree_right,
+};
+
+/* Kfunc family related to rbtree. */
+static const enum special_kfunc_type bpf_rbtree_api_kfuncs[] = {
+ KF_bpf_rbtree_add_impl,
+ KF_bpf_rbtree_remove,
+ KF_bpf_rbtree_first,
+ KF_bpf_rbtree_root,
+ KF_bpf_rbtree_left,
+ KF_bpf_rbtree_right,
+};
+
+/* Kfunc family related to spin_lock. */
+static const enum special_kfunc_type bpf_res_spin_lock_api_kfuncs[] = {
+ KF_bpf_res_spin_lock,
+ KF_bpf_res_spin_unlock,
+ KF_bpf_res_spin_lock_irqsave,
+ KF_bpf_res_spin_unlock_irqrestore,
+};
+
+/* Kfunc family related to iter_num. */
+static const enum special_kfunc_type bpf_iter_num_api_kfuncs[] = {
+ KF_bpf_iter_num_new,
+ KF_bpf_iter_num_next,
+ KF_bpf_iter_num_destroy,
+};
+
+/* Kfunc family related to arena. */
+static const enum special_kfunc_type bpf_arena_api_kfuncs[] = {
+ KF_bpf_arena_alloc_pages,
+ KF_bpf_arena_free_pages,
+ KF_bpf_arena_reserve_pages,
+};
+
+/* Kfunc family related to stream. */
+static const enum special_kfunc_type bpf_stream_api_kfuncs[] = {
+ KF_bpf_stream_vprintk,
+ KF_bpf_stream_print_stack,
+};
+
+/* Kfuncs that must be called when inserting a node in list/rbtree. */
+static const enum special_kfunc_type bpf_collection_insert_kfuncs[] = {
+ KF_bpf_list_push_front_impl,
+ KF_bpf_list_push_back_impl,
+ KF_bpf_rbtree_add_impl,
+};
+
+/* KF_ACQUIRE kfuncs whose vmlinux BTF return type is void* */
+static const enum special_kfunc_type bpf_obj_acquire_ptr_kfuncs[] = {
+ KF_bpf_obj_new_impl,
+ KF_bpf_percpu_obj_new_impl,
+ KF_bpf_refcount_acquire_impl,
+};
+
+/* Kfunc family related to task_work. */
+static const enum special_kfunc_type bpf_task_work_api_kfuncs[] = {
+ KF_bpf_task_work_schedule_signal,
+ KF_bpf_task_work_schedule_resume,
+};
+
+/* __kfuncs must be an array identifier (not a pointer), for ARRAY_SIZE. */
+#define btf_id_in_kfunc_table(__btf_id, __kfuncs) \
+ ({ \
+ u32 ___id = (__btf_id); \
+ unsigned int ___i; \
+ bool ___found = false; \
+ \
+ for (___i = 0; ___i < ARRAY_SIZE(__kfuncs); ___i++) { \
+ if (___id == special_kfunc_list[(__kfuncs)[___i]]) { \
+ ___found = true; \
+ break; \
+ } \
+ } \
+ ___found; \
+ })
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
{
@@ -12680,6 +12744,29 @@ static bool is_kfunc_pkt_changing(struct bpf_kfunc_call_arg_meta *meta)
return meta->func_id == special_kfunc_list[KF_bpf_xdp_pull_data];
}
+/* Are we currently verifying the callback for a rbtree helper that must
+ * be called with lock held? If so, no need to complain about unreleased
+ * lock
+ */
+static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
+{
+ struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_insn *insn = env->prog->insnsi;
+ struct bpf_func_state *callee;
+ int kfunc_btf_id;
+
+ if (!state->curframe)
+ return false;
+
+ callee = state->frame[state->curframe];
+
+ if (!callee->in_callback_fn)
+ return false;
+
+ kfunc_btf_id = insn[callee->callsite].imm;
+ return btf_id_in_kfunc_table(kfunc_btf_id, bpf_rbtree_api_kfuncs);
+}
+
static enum kfunc_ptr_arg_type
get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
struct bpf_kfunc_call_arg_meta *meta,
@@ -13036,65 +13123,20 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
return 0;
}
-static bool is_bpf_list_api_kfunc(u32 btf_id)
-{
- return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
- btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
- btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
- btf_id == special_kfunc_list[KF_bpf_list_pop_back] ||
- btf_id == special_kfunc_list[KF_bpf_list_front] ||
- btf_id == special_kfunc_list[KF_bpf_list_back];
-}
-
-static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
-{
- return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_first] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_root] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_right];
-}
-
-static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
-{
- return btf_id == special_kfunc_list[KF_bpf_iter_num_new] ||
- btf_id == special_kfunc_list[KF_bpf_iter_num_next] ||
- btf_id == special_kfunc_list[KF_bpf_iter_num_destroy];
-}
-
static bool is_bpf_graph_api_kfunc(u32 btf_id)
{
- return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) ||
+ return btf_id_in_kfunc_table(btf_id, bpf_list_api_kfuncs) ||
+ btf_id_in_kfunc_table(btf_id, bpf_rbtree_api_kfuncs) ||
btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl];
}
-static bool is_bpf_res_spin_lock_kfunc(u32 btf_id)
-{
- return btf_id == special_kfunc_list[KF_bpf_res_spin_lock] ||
- btf_id == special_kfunc_list[KF_bpf_res_spin_unlock] ||
- btf_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave] ||
- btf_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore];
-}
-
-static bool is_bpf_arena_kfunc(u32 btf_id)
-{
- return btf_id == special_kfunc_list[KF_bpf_arena_alloc_pages] ||
- btf_id == special_kfunc_list[KF_bpf_arena_free_pages] ||
- btf_id == special_kfunc_list[KF_bpf_arena_reserve_pages];
-}
-
-static bool is_bpf_stream_kfunc(u32 btf_id)
-{
- return btf_id == special_kfunc_list[KF_bpf_stream_vprintk] ||
- btf_id == special_kfunc_list[KF_bpf_stream_print_stack];
-}
-
static bool kfunc_spin_allowed(u32 btf_id)
{
- return is_bpf_graph_api_kfunc(btf_id) || is_bpf_iter_num_api_kfunc(btf_id) ||
- is_bpf_res_spin_lock_kfunc(btf_id) || is_bpf_arena_kfunc(btf_id) ||
- is_bpf_stream_kfunc(btf_id);
+ return is_bpf_graph_api_kfunc(btf_id) ||
+ btf_id_in_kfunc_table(btf_id, bpf_iter_num_api_kfuncs) ||
+ btf_id_in_kfunc_table(btf_id, bpf_res_spin_lock_api_kfuncs) ||
+ btf_id_in_kfunc_table(btf_id, bpf_arena_api_kfuncs) ||
+ btf_id_in_kfunc_table(btf_id, bpf_stream_api_kfuncs);
}
static bool is_sync_callback_calling_kfunc(u32 btf_id)
@@ -13102,12 +13144,6 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id)
return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
}
-static bool is_async_callback_calling_kfunc(u32 btf_id)
-{
- return is_bpf_wq_set_callback_kfunc(btf_id) ||
- is_task_work_add_kfunc(btf_id);
-}
-
static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
{
return bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
@@ -13119,15 +13155,16 @@ static bool is_bpf_wq_set_callback_kfunc(u32 btf_id)
return btf_id == special_kfunc_list[KF_bpf_wq_set_callback];
}
-static bool is_callback_calling_kfunc(u32 btf_id)
+static bool is_async_callback_calling_kfunc(u32 btf_id)
{
- return is_sync_callback_calling_kfunc(btf_id) ||
- is_async_callback_calling_kfunc(btf_id);
+ return is_bpf_wq_set_callback_kfunc(btf_id) ||
+ btf_id_in_kfunc_table(btf_id, bpf_task_work_api_kfuncs);
}
-static bool is_rbtree_lock_required_kfunc(u32 btf_id)
+static bool is_callback_calling_kfunc(u32 btf_id)
{
- return is_bpf_rbtree_api_kfunc(btf_id);
+ return is_sync_callback_calling_kfunc(btf_id) ||
+ is_async_callback_calling_kfunc(btf_id);
}
static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
@@ -13138,10 +13175,10 @@ static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
switch (head_field_type) {
case BPF_LIST_HEAD:
- ret = is_bpf_list_api_kfunc(kfunc_btf_id);
+ ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_list_api_kfuncs);
break;
case BPF_RB_ROOT:
- ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id);
+ ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_rbtree_api_kfuncs);
break;
default:
verbose(env, "verifier internal error: unexpected graph root argument type %s\n",
@@ -13163,14 +13200,10 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
switch (node_field_type) {
case BPF_LIST_NODE:
- ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]);
+ ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_list_node_api_kfuncs);
break;
case BPF_RB_NODE:
- ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]);
+ ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_rbtree_node_api_kfuncs);
break;
default:
verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
@@ -13878,7 +13911,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EINVAL;
}
- if (!is_bpf_res_spin_lock_kfunc(meta->func_id))
+ if (!btf_id_in_kfunc_table(meta->func_id, bpf_res_spin_lock_api_kfuncs))
return -EFAULT;
if (meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock] ||
meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave])
@@ -14215,7 +14248,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
}
}
- if (is_task_work_add_kfunc(meta.func_id)) {
+ if (btf_id_in_kfunc_table(meta.func_id, bpf_task_work_api_kfuncs)) {
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_task_work_schedule_callback_state);
if (err) {
@@ -14304,9 +14337,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return err;
}
- if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
- meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
- meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
+ if (btf_id_in_kfunc_table(meta.func_id, bpf_collection_insert_kfuncs)) {
release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
insn_aux->insert_off = regs[BPF_REG_2].var_off.value;
insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id);
@@ -14354,11 +14385,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL);
if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
- /* Only exception is bpf_obj_new_impl */
+ /* Only exception is bpf_obj_acquire_ptr_kfuncs */
if (meta.btf != btf_vmlinux ||
- (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] &&
- meta.func_id != special_kfunc_list[KF_bpf_percpu_obj_new_impl] &&
- meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) {
+ !btf_id_in_kfunc_table(meta.func_id, bpf_obj_acquire_ptr_kfuncs)) {
verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
return -EINVAL;
}
@@ -23316,9 +23345,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
insn_buf[1] = addr[1];
insn_buf[2] = *insn;
*cnt = 3;
- } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
- desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
- desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
+ } else if (btf_id_in_kfunc_table(desc->func_id, bpf_collection_insert_kfuncs)) {
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
int struct_meta_reg = BPF_REG_3;
int node_offset_reg = BPF_REG_4;
--
2.50.1 (Apple Git-155)
^ permalink raw reply related [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v9 1/9] bpf: refactor kfunc checks using table-driven approach in verifier
2026-03-29 14:04 ` [PATCH bpf-next v9 1/9] bpf: refactor kfunc checks using table-driven approach in verifier Chengkaitao
@ 2026-03-30 15:20 ` Mykyta Yatsenko
2026-03-30 17:05 ` Alexei Starovoitov
1 sibling, 0 replies; 16+ messages in thread
From: Mykyta Yatsenko @ 2026-03-30 15:20 UTC (permalink / raw)
To: Chengkaitao, martin.lau, ast, daniel, andrii, eddyz87, song,
yonghong.song, john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah,
chengkaitao, linux-kselftest
Cc: bpf, linux-kernel
Chengkaitao <pilgrimtao@gmail.com> writes:
> From: Kaitao Cheng <chengkaitao@kylinos.cn>
>
> Replace per-kfunc btf_id chains check with btf_id_in_kfunc_table() and
> static kfunc tables for easier maintenance.
>
> Prepare for future extensions to the bpf_list API family.
>
> Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
> ---
I think old is_bpf_rbtree_api_kfunc(kfunc_btf_id) is more readable than new
btf_id_in_kfunc_table(kfunc_btf_id, bpf_rbtree_api_kfuncs);
Not sure if this change should be bundled with this series, it looks
quite orthogonal.
> kernel/bpf/verifier.c | 261 +++++++++++++++++++++++-------------------
> 1 file changed, 144 insertions(+), 117 deletions(-)
>
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 4fbacd2149cd..f2d9863bb290 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -544,9 +544,6 @@ static bool is_async_callback_calling_kfunc(u32 btf_id);
> static bool is_callback_calling_kfunc(u32 btf_id);
> static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
>
> -static bool is_bpf_wq_set_callback_kfunc(u32 btf_id);
> -static bool is_task_work_add_kfunc(u32 func_id);
> -
> static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
> {
> return func_id == BPF_FUNC_for_each_map_elem ||
> @@ -586,7 +583,7 @@ static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn
>
> /* bpf_wq and bpf_task_work callbacks are always sleepable. */
> if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
> - (is_bpf_wq_set_callback_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
> + is_async_callback_calling_kfunc(insn->imm))
> return true;
>
> verifier_bug(env, "unhandled async callback in is_async_cb_sleepable");
> @@ -11203,31 +11200,6 @@ static int set_task_work_schedule_callback_state(struct bpf_verifier_env *env,
> return 0;
> }
>
> ...
> --
> 2.50.1 (Apple Git-155)
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v9 1/9] bpf: refactor kfunc checks using table-driven approach in verifier
2026-03-29 14:04 ` [PATCH bpf-next v9 1/9] bpf: refactor kfunc checks using table-driven approach in verifier Chengkaitao
2026-03-30 15:20 ` Mykyta Yatsenko
@ 2026-03-30 17:05 ` Alexei Starovoitov
2026-04-03 17:41 ` Chengkaitao
1 sibling, 1 reply; 16+ messages in thread
From: Alexei Starovoitov @ 2026-03-30 17:05 UTC (permalink / raw)
To: Chengkaitao
Cc: Martin KaFai Lau, Alexei Starovoitov, Daniel Borkmann,
Andrii Nakryiko, Eduard, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Shuah Khan,
Chengkaitao, open list:KERNEL SELFTEST FRAMEWORK, bpf, LKML
On Sun, Mar 29, 2026 at 7:05 AM Chengkaitao <pilgrimtao@gmail.com> wrote:
>
> From: Kaitao Cheng <chengkaitao@kylinos.cn>
>
> Replace per-kfunc btf_id chains check with btf_id_in_kfunc_table() and
> static kfunc tables for easier maintenance.
>
> Prepare for future extensions to the bpf_list API family.
>
> Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
> ---
> kernel/bpf/verifier.c | 261 +++++++++++++++++++++++-------------------
> 1 file changed, 144 insertions(+), 117 deletions(-)
>
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 4fbacd2149cd..f2d9863bb290 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -544,9 +544,6 @@ static bool is_async_callback_calling_kfunc(u32 btf_id);
> static bool is_callback_calling_kfunc(u32 btf_id);
> static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
>
> -static bool is_bpf_wq_set_callback_kfunc(u32 btf_id);
> -static bool is_task_work_add_kfunc(u32 func_id);
> -
> static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
> {
> return func_id == BPF_FUNC_for_each_map_elem ||
> @@ -586,7 +583,7 @@ static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn
>
> /* bpf_wq and bpf_task_work callbacks are always sleepable. */
> if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
> - (is_bpf_wq_set_callback_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
> + is_async_callback_calling_kfunc(insn->imm))
> return true;
>
> verifier_bug(env, "unhandled async callback in is_async_cb_sleepable");
> @@ -11203,31 +11200,6 @@ static int set_task_work_schedule_callback_state(struct bpf_verifier_env *env,
> return 0;
> }
>
> -static bool is_rbtree_lock_required_kfunc(u32 btf_id);
> -
> -/* Are we currently verifying the callback for a rbtree helper that must
> - * be called with lock held? If so, no need to complain about unreleased
> - * lock
> - */
> -static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
> -{
> - struct bpf_verifier_state *state = env->cur_state;
> - struct bpf_insn *insn = env->prog->insnsi;
> - struct bpf_func_state *callee;
> - int kfunc_btf_id;
> -
> - if (!state->curframe)
> - return false;
> -
> - callee = state->frame[state->curframe];
> -
> - if (!callee->in_callback_fn)
> - return false;
> -
> - kfunc_btf_id = insn[callee->callsite].imm;
> - return is_rbtree_lock_required_kfunc(kfunc_btf_id);
> -}
> -
> static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
> {
> if (range.return_32bit)
> @@ -12639,11 +12611,103 @@ BTF_ID(func, bpf_session_is_return)
> BTF_ID(func, bpf_stream_vprintk)
> BTF_ID(func, bpf_stream_print_stack)
>
> -static bool is_task_work_add_kfunc(u32 func_id)
> -{
> - return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
> - func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume];
> -}
> +/* Kfunc family related to list. */
> +static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
> + KF_bpf_list_push_front_impl,
> + KF_bpf_list_push_back_impl,
> + KF_bpf_list_pop_front,
> + KF_bpf_list_pop_back,
> + KF_bpf_list_front,
> + KF_bpf_list_back,
> +};
> +
> +/* Kfuncs that take a list node argument (bpf_list_node *). */
> +static const enum special_kfunc_type bpf_list_node_api_kfuncs[] = {
> + KF_bpf_list_push_front_impl,
> + KF_bpf_list_push_back_impl,
> +};
> +
> +/* Kfuncs that take an rbtree node argument (bpf_rb_node *). */
> +static const enum special_kfunc_type bpf_rbtree_node_api_kfuncs[] = {
> + KF_bpf_rbtree_remove,
> + KF_bpf_rbtree_add_impl,
> + KF_bpf_rbtree_left,
> + KF_bpf_rbtree_right,
> +};
> +
> +/* Kfunc family related to rbtree. */
> +static const enum special_kfunc_type bpf_rbtree_api_kfuncs[] = {
> + KF_bpf_rbtree_add_impl,
> + KF_bpf_rbtree_remove,
> + KF_bpf_rbtree_first,
> + KF_bpf_rbtree_root,
> + KF_bpf_rbtree_left,
> + KF_bpf_rbtree_right,
> +};
> +
> +/* Kfunc family related to spin_lock. */
> +static const enum special_kfunc_type bpf_res_spin_lock_api_kfuncs[] = {
> + KF_bpf_res_spin_lock,
> + KF_bpf_res_spin_unlock,
> + KF_bpf_res_spin_lock_irqsave,
> + KF_bpf_res_spin_unlock_irqrestore,
> +};
I think it's a step in the wrong direction.
I'd wait for Ihor's BTF_ID_NAMED cleanup.
Kaitao Cheng,
also please start your part of code reviews.
Your patches are not going to be landing if you don't code review.
https://lore.kernel.org/bpf/CAADnVQ+TKKptnNB25V3=bcdybh5G6c2DyW2sYtXvyRaVnPN8MA@mail.gmail.com/
pw-bot: cr
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v9 1/9] bpf: refactor kfunc checks using table-driven approach in verifier
2026-03-30 17:05 ` Alexei Starovoitov
@ 2026-04-03 17:41 ` Chengkaitao
2026-04-04 4:49 ` Ihor Solodrai
0 siblings, 1 reply; 16+ messages in thread
From: Chengkaitao @ 2026-04-03 17:41 UTC (permalink / raw)
To: Alexei Starovoitov
Cc: Martin KaFai Lau, Alexei Starovoitov, Daniel Borkmann,
Andrii Nakryiko, Eduard, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Shuah Khan,
Chengkaitao, open list:KERNEL SELFTEST FRAMEWORK, bpf, LKML,
ihor.solodrai
On Tue, Mar 31, 2026 at 1:05 AM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Sun, Mar 29, 2026 at 7:05 AM Chengkaitao <pilgrimtao@gmail.com> wrote:
> >
> > From: Kaitao Cheng <chengkaitao@kylinos.cn>
> >
> > Replace per-kfunc btf_id chains check with btf_id_in_kfunc_table() and
> > static kfunc tables for easier maintenance.
> >
> > Prepare for future extensions to the bpf_list API family.
> >
> > Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
> > ---
> > kernel/bpf/verifier.c | 261 +++++++++++++++++++++++-------------------
> > 1 file changed, 144 insertions(+), 117 deletions(-)
> >
> > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> > index 4fbacd2149cd..f2d9863bb290 100644
> > --- a/kernel/bpf/verifier.c
> > +++ b/kernel/bpf/verifier.c
> > @@ -544,9 +544,6 @@ static bool is_async_callback_calling_kfunc(u32 btf_id);
> > static bool is_callback_calling_kfunc(u32 btf_id);
> > static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
> >
> > -static bool is_bpf_wq_set_callback_kfunc(u32 btf_id);
> > -static bool is_task_work_add_kfunc(u32 func_id);
> > -
> > static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
> > {
> > return func_id == BPF_FUNC_for_each_map_elem ||
> > @@ -586,7 +583,7 @@ static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn
> >
> > /* bpf_wq and bpf_task_work callbacks are always sleepable. */
> > if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
> > - (is_bpf_wq_set_callback_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
> > + is_async_callback_calling_kfunc(insn->imm))
> > return true;
> >
> > verifier_bug(env, "unhandled async callback in is_async_cb_sleepable");
> > @@ -11203,31 +11200,6 @@ static int set_task_work_schedule_callback_state(struct bpf_verifier_env *env,
> > return 0;
> > }
> >
> > -static bool is_rbtree_lock_required_kfunc(u32 btf_id);
> > -
> > -/* Are we currently verifying the callback for a rbtree helper that must
> > - * be called with lock held? If so, no need to complain about unreleased
> > - * lock
> > - */
> > -static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
> > -{
> > - struct bpf_verifier_state *state = env->cur_state;
> > - struct bpf_insn *insn = env->prog->insnsi;
> > - struct bpf_func_state *callee;
> > - int kfunc_btf_id;
> > -
> > - if (!state->curframe)
> > - return false;
> > -
> > - callee = state->frame[state->curframe];
> > -
> > - if (!callee->in_callback_fn)
> > - return false;
> > -
> > - kfunc_btf_id = insn[callee->callsite].imm;
> > - return is_rbtree_lock_required_kfunc(kfunc_btf_id);
> > -}
> > -
> > static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
> > {
> > if (range.return_32bit)
> > @@ -12639,11 +12611,103 @@ BTF_ID(func, bpf_session_is_return)
> > BTF_ID(func, bpf_stream_vprintk)
> > BTF_ID(func, bpf_stream_print_stack)
> >
> > -static bool is_task_work_add_kfunc(u32 func_id)
> > -{
> > - return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
> > - func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume];
> > -}
> > +/* Kfunc family related to list. */
> > +static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
> > + KF_bpf_list_push_front_impl,
> > + KF_bpf_list_push_back_impl,
> > + KF_bpf_list_pop_front,
> > + KF_bpf_list_pop_back,
> > + KF_bpf_list_front,
> > + KF_bpf_list_back,
> > +};
> > +
> > +/* Kfuncs that take a list node argument (bpf_list_node *). */
> > +static const enum special_kfunc_type bpf_list_node_api_kfuncs[] = {
> > + KF_bpf_list_push_front_impl,
> > + KF_bpf_list_push_back_impl,
> > +};
> > +
> > +/* Kfuncs that take an rbtree node argument (bpf_rb_node *). */
> > +static const enum special_kfunc_type bpf_rbtree_node_api_kfuncs[] = {
> > + KF_bpf_rbtree_remove,
> > + KF_bpf_rbtree_add_impl,
> > + KF_bpf_rbtree_left,
> > + KF_bpf_rbtree_right,
> > +};
> > +
> > +/* Kfunc family related to rbtree. */
> > +static const enum special_kfunc_type bpf_rbtree_api_kfuncs[] = {
> > + KF_bpf_rbtree_add_impl,
> > + KF_bpf_rbtree_remove,
> > + KF_bpf_rbtree_first,
> > + KF_bpf_rbtree_root,
> > + KF_bpf_rbtree_left,
> > + KF_bpf_rbtree_right,
> > +};
> > +
> > +/* Kfunc family related to spin_lock. */
> > +static const enum special_kfunc_type bpf_res_spin_lock_api_kfuncs[] = {
> > + KF_bpf_res_spin_lock,
> > + KF_bpf_res_spin_unlock,
> > + KF_bpf_res_spin_lock_irqsave,
> > + KF_bpf_res_spin_unlock_irqrestore,
> > +};
>
> I think it's a step in the wrong direction.
> I'd wait for Ihor's BTF_ID_NAMED cleanup.
After reading Ihor's messages on the list, if I understand correctly,
our two approaches seem to target different problems. What Ihor's
work appears to achieve is the ability to remove the entire enum
special_kfunc_type. My goal, on the other hand, is to replace many
scattered func_id == special_kfunc_list[...] comparisons with a
table-driven approach.
That said, once Ihor's solution lands, my approach would no longer
be applicable as-is. So I have been thinking about an alternative,
please see the patch linked below for details:
https://lore.kernel.org/bpf/20260403170900.58659-1-pilgrimtao@gmail.com/
> Kaitao Cheng,
>
> also please start your part of code reviews.
> Your patches are not going to be landing if you don't code review.
>
> https://lore.kernel.org/bpf/CAADnVQ+TKKptnNB25V3=bcdybh5G6c2DyW2sYtXvyRaVnPN8MA@mail.gmail.com/
I will do my best to carry this forward.
--
Yours,
Chengkaitao
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v9 1/9] bpf: refactor kfunc checks using table-driven approach in verifier
2026-04-03 17:41 ` Chengkaitao
@ 2026-04-04 4:49 ` Ihor Solodrai
2026-04-04 10:38 ` Chengkaitao
0 siblings, 1 reply; 16+ messages in thread
From: Ihor Solodrai @ 2026-04-04 4:49 UTC (permalink / raw)
To: Chengkaitao, Alexei Starovoitov
Cc: Martin KaFai Lau, Alexei Starovoitov, Daniel Borkmann,
Andrii Nakryiko, Eduard, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Shuah Khan,
Chengkaitao, open list:KERNEL SELFTEST FRAMEWORK, bpf, LKML
On 4/3/26 10:41 AM, Chengkaitao wrote:
> On Tue, Mar 31, 2026 at 1:05 AM Alexei Starovoitov
> <alexei.starovoitov@gmail.com> wrote:
>>
>> On Sun, Mar 29, 2026 at 7:05 AM Chengkaitao <pilgrimtao@gmail.com> wrote:
>>>
>>> From: Kaitao Cheng <chengkaitao@kylinos.cn>
>>>
>>> [...]
>>> +
>>> +/* Kfunc family related to spin_lock. */
>>> +static const enum special_kfunc_type bpf_res_spin_lock_api_kfuncs[] = {
>>> + KF_bpf_res_spin_lock,
>>> + KF_bpf_res_spin_unlock,
>>> + KF_bpf_res_spin_lock_irqsave,
>>> + KF_bpf_res_spin_unlock_irqrestore,
>>> +};
>>
>> I think it's a step in the wrong direction.
>> I'd wait for Ihor's BTF_ID_NAMED cleanup.
>
> After reading Ihor's messages on the list, if I understand correctly,
> our two approaches seem to target different problems. What Ihor's
> work appears to achieve is the ability to remove the entire enum
> special_kfunc_type. My goal, on the other hand, is to replace many
> scattered func_id == special_kfunc_list[...] comparisons with a
> table-driven approach.
Hi Kaitao,
I appreciate your efforts, however after a quick pass over the changes
you propose (both here and in the new series) with respect to BTF_ID
macros and special_kfuncs_list, I don't understand what problem you're
trying to solve.
The inherent complexity is in the fact that the verifier must know
when a particular BTF id identifies a specific kfunc, or whether it
belongs to some pre-defined set of ids. This is why
special_kfuncs_list and other BTF_ID_SET/LIST-s exist.
And so there is no way around defining those ids and sets *somewhere*,
and so far BTF_ID_* macros did a fine job of that, all things
considered.
AFAICT your changes simply move around the same definitions from
functions with if statements to constant arrays with a runtime search
on them (which is slower by the way). What is the benefit of that vs
the current implementation? We still have to maintain those arrays in
the same way we have to maintain the is_foo_kfunc helpers.
Your newer proposal [1] takes the same idea to the next level, by
introducing an entire new BTF kind, new ELF sections and a bunch of
macros that are no less complicated than existing. And all of that
just moves the same arrays "upstream" to the .BTF_ids section. Again,
I fail to see any benefits to that complexity. Having differentiation
between LIST and SET, and having to mark START and END is not a
problem that needs solving IMO.
The work I was discussing with Alexei [2] is targeted: get rid of
constant enums that mirror existing kfunc symbols by making BTF_ID
internals a bit smarter. And the benefit is clear and simple.
If you could explain what exact issue you're trying to address with
your BTF_ID refactoring patches, maybe we can try converging on a
reasonable approach to it.
Thanks!
[1] https://lore.kernel.org/bpf/20260403170900.58659-1-pilgrimtao@gmail.com/
[2] https://lore.kernel.org/bpf/21e5333c-0b57-46ce-99c8-f6c414270e70@linux.dev/
>
> That said, once Ihor's solution lands, my approach would no longer
> be applicable as-is. So I have been thinking about an alternative,
> please see the patch linked below for details:
> https://lore.kernel.org/bpf/20260403170900.58659-1-pilgrimtao@gmail.com/
>
>> Kaitao Cheng,
>>
>> also please start your part of code reviews.
>> Your patches are not going to be landing if you don't code review.
>>
>> https://lore.kernel.org/bpf/CAADnVQ+TKKptnNB25V3=bcdybh5G6c2DyW2sYtXvyRaVnPN8MA@mail.gmail.com/
>
> I will do my best to carry this forward.
>
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v9 1/9] bpf: refactor kfunc checks using table-driven approach in verifier
2026-04-04 4:49 ` Ihor Solodrai
@ 2026-04-04 10:38 ` Chengkaitao
0 siblings, 0 replies; 16+ messages in thread
From: Chengkaitao @ 2026-04-04 10:38 UTC (permalink / raw)
To: Ihor Solodrai
Cc: Alexei Starovoitov, Martin KaFai Lau, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, Eduard, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, Chengkaitao, open list:KERNEL SELFTEST FRAMEWORK, bpf,
LKML
On Sat, Apr 4, 2026 at 12:49 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>
>
>
> On 4/3/26 10:41 AM, Chengkaitao wrote:
> > On Tue, Mar 31, 2026 at 1:05 AM Alexei Starovoitov
> > <alexei.starovoitov@gmail.com> wrote:
> >>
> >> On Sun, Mar 29, 2026 at 7:05 AM Chengkaitao <pilgrimtao@gmail.com> wrote:
> >>>
> >>> From: Kaitao Cheng <chengkaitao@kylinos.cn>
> >>>
> >>> [...]
> >>> +
> >>> +/* Kfunc family related to spin_lock. */
> >>> +static const enum special_kfunc_type bpf_res_spin_lock_api_kfuncs[] = {
> >>> + KF_bpf_res_spin_lock,
> >>> + KF_bpf_res_spin_unlock,
> >>> + KF_bpf_res_spin_lock_irqsave,
> >>> + KF_bpf_res_spin_unlock_irqrestore,
> >>> +};
> >>
> >> I think it's a step in the wrong direction.
> >> I'd wait for Ihor's BTF_ID_NAMED cleanup.
> >
> > After reading Ihor's messages on the list, if I understand correctly,
> > our two approaches seem to target different problems. What Ihor's
> > work appears to achieve is the ability to remove the entire enum
> > special_kfunc_type. My goal, on the other hand, is to replace many
> > scattered func_id == special_kfunc_list[...] comparisons with a
> > table-driven approach.
>
> Hi Kaitao,
>
> I appreciate your efforts, however after a quick pass over the changes
> you propose (both here and in the new series) with respect to BTF_ID
> macros and special_kfuncs_list, I don't understand what problem you're
> trying to solve.
>
> The inherent complexity is in the fact that the verifier must know
> when a particular BTF id identifies a specific kfunc, or whether it
> belongs to some pre-defined set of ids. This is why
> special_kfuncs_list and other BTF_ID_SET/LIST-s exist.
>
> And so there is no way around defining those ids and sets *somewhere*,
> and so far BTF_ID_* macros did a fine job of that, all things
> considered.
>
> AFAICT your changes simply move around the same definitions from
> functions with if statements to constant arrays with a runtime search
> on them (which is slower by the way). What is the benefit of that vs
> the current implementation? We still have to maintain those arrays in
> the same way we have to maintain the is_foo_kfunc helpers.
>
> Your newer proposal [1] takes the same idea to the next level, by
> introducing an entire new BTF kind, new ELF sections and a bunch of
> macros that are no less complicated than existing. And all of that
> just moves the same arrays "upstream" to the .BTF_ids section. Again,
> I fail to see any benefits to that complexity. Having differentiation
> between LIST and SET, and having to mark START and END is not a
> problem that needs solving IMO.
Your analysis of the code implementation for the new proposal is correct.
Let me elaborate on the purpose behind my approach.
****** Purpose 1 ******
As described in this patch:
https://lore.kernel.org/bpf/20260303135219.33726-4-pilgrimtao@gmail.com/
If we want to add a new kfunc, bpf_list_add_impl, we would today have
to add "btf_id == special_kfunc_list[KF_bpf_list_back]" (or similar)
five times in verifier.c. Under the newer proposal, that is no longer
necessary: defining the kfunc and its verifier metadata in one place
is enough, for example:
__bpf_kfunc int bpf_list_add_impl(struct bpf_list_head *head,
struct bpf_list_node *new,
struct bpf_list_node *prev,
void *meta__ign, u64 off)
{
/* kfunc implementation */
.......
}
BPF_VERIF_KFUNC_DEF(bpf_list_add_impl, list_api, graph_node_api, ... )
If BPF_VERIF_KFUNC_DEF is extended further, BTF_ID(func, bpf_list_add_impl)
and BTF_ID_FLAGS(func, bpf_list_add_impl) might also become unnecessary,
so the snippet above could eventually be close to all the code required
to add a new kfunc.
****** Purpose 2 ******
The kernel no longer needs enum special_kfunc_type to list every KF_bpf_*
entry. That information is folded into the .BTF_ids.##sfx section instead,
so kfunc authors do not have to touch or think about special_kfunc_type.
****** Purpose 3 ******
As described in this patch:
https://lore.kernel.org/bpf/20260303135219.33726-6-pilgrimtao@gmail.com/
In is_bpf_list_api_kfunc(u32 btf_id) there are on the order of eleven
"btf_id == special_kfunc_list[*]" comparisons. As more kfuncs are added,
every is_bpf_* helper will only grow longer and the verifier will get
more repetitive. With the new design, those is_bpf_* helpers can be
removed entirely, including the awkward scattered "btf_id == *" checks.
****** Purpose 4 ******
It pushes us to untangle messy verifier safety cases and make them modular,
so they can be expressed as parameters to BPF_VERIF_KFUNC_DEF
> The work I was discussing with Alexei [2] is targeted: get rid of
> constant enums that mirror existing kfunc symbols by making BTF_ID
> internals a bit smarter. And the benefit is clear and simple.
>
> If you could explain what exact issue you're trying to address with
> your BTF_ID refactoring patches, maybe we can try converging on a
> reasonable approach to it.
>
> Thanks!
>
> [1] https://lore.kernel.org/bpf/20260403170900.58659-1-pilgrimtao@gmail.com/
> [2] https://lore.kernel.org/bpf/21e5333c-0b57-46ce-99c8-f6c414270e70@linux.dev/
>
>
> >
> > That said, once Ihor's solution lands, my approach would no longer
> > be applicable as-is. So I have been thinking about an alternative,
> > please see the patch linked below for details:
> > https://lore.kernel.org/bpf/20260403170900.58659-1-pilgrimtao@gmail.com/
> >
> >> Kaitao Cheng,
> >>
> >> also please start your part of code reviews.
> >> Your patches are not going to be landing if you don't code review.
> >>
> >> https://lore.kernel.org/bpf/CAADnVQ+TKKptnNB25V3=bcdybh5G6c2DyW2sYtXvyRaVnPN8MA@mail.gmail.com/
> >
> > I will do my best to carry this forward.
--
Yours,
Chengkaitao
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next v9 2/9] bpf: refactor __bpf_list_del to take list node pointer
2026-03-29 14:04 [PATCH bpf-next v9 0/9] bpf: Extend the bpf_list family of APIs Chengkaitao
2026-03-29 14:04 ` [PATCH bpf-next v9 1/9] bpf: refactor kfunc checks using table-driven approach in verifier Chengkaitao
@ 2026-03-29 14:04 ` Chengkaitao
2026-03-29 14:05 ` [PATCH bpf-next v9 3/9] bpf: clear list node owner and unlink before drop Chengkaitao
` (6 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Chengkaitao @ 2026-03-29 14:04 UTC (permalink / raw)
To: martin.lau, ast, daniel, andrii, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, chengkaitao,
linux-kselftest
Cc: bpf, linux-kernel
From: Kaitao Cheng <chengkaitao@kylinos.cn>
Refactor __bpf_list_del to accept (head, struct list_head *n) instead of
(head, bool tail). The caller now passes the specific node to remove:
bpf_list_pop_front passes h->next, bpf_list_pop_back passes h->prev.
Prepares for introducing bpf_list_del(head, node) kfunc to remove an
arbitrary node when the user holds ownership.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
kernel/bpf/helpers.c | 21 ++++++++++++++-------
1 file changed, 14 insertions(+), 7 deletions(-)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index cb6d242bd093..94fcd4ab39e9 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2426,22 +2426,25 @@ __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
}
-static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
+static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head,
+ struct list_head *n)
{
- struct list_head *n, *h = (void *)head;
+ struct list_head *h = (void *)head;
struct bpf_list_node_kern *node;
/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
* called on its fields, so init here
*/
- if (unlikely(!h->next))
+ if (unlikely(!h->next)) {
INIT_LIST_HEAD(h);
+ return NULL;
+ }
+
if (list_empty(h))
return NULL;
- n = tail ? h->prev : h->next;
node = container_of(n, struct bpf_list_node_kern, list_head);
- if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
+ if (unlikely(READ_ONCE(node->owner) != head))
return NULL;
list_del_init(n);
@@ -2451,12 +2454,16 @@ static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tai
__bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
{
- return __bpf_list_del(head, false);
+ struct list_head *h = (void *)head;
+
+ return __bpf_list_del(head, h->next);
}
__bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
{
- return __bpf_list_del(head, true);
+ struct list_head *h = (void *)head;
+
+ return __bpf_list_del(head, h->prev);
}
__bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head)
--
2.50.1 (Apple Git-155)
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH bpf-next v9 3/9] bpf: clear list node owner and unlink before drop
2026-03-29 14:04 [PATCH bpf-next v9 0/9] bpf: Extend the bpf_list family of APIs Chengkaitao
2026-03-29 14:04 ` [PATCH bpf-next v9 1/9] bpf: refactor kfunc checks using table-driven approach in verifier Chengkaitao
2026-03-29 14:04 ` [PATCH bpf-next v9 2/9] bpf: refactor __bpf_list_del to take list node pointer Chengkaitao
@ 2026-03-29 14:05 ` Chengkaitao
2026-03-29 14:45 ` bot+bpf-ci
2026-03-29 14:05 ` [PATCH bpf-next v9 4/9] bpf: Introduce the bpf_list_del kfunc Chengkaitao
` (5 subsequent siblings)
8 siblings, 1 reply; 16+ messages in thread
From: Chengkaitao @ 2026-03-29 14:05 UTC (permalink / raw)
To: martin.lau, ast, daniel, andrii, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, chengkaitao,
linux-kselftest
Cc: bpf, linux-kernel
From: Kaitao Cheng <chengkaitao@kylinos.cn>
When draining a BPF list_head, clear each node's owner pointer while still
holding the spinlock, so concurrent readers always see a consistent owner.
Delink each node with list_del_init() before calling __bpf_obj_drop_impl(),
preventing subsequent users who hold a reference count to the node from
acquiring an invalid next node.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
kernel/bpf/helpers.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 94fcd4ab39e9..8abb99712043 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2232,7 +2232,7 @@ EXPORT_SYMBOL_GPL(bpf_base_func_proto);
void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock)
{
- struct list_head *head = list_head, *orig_head = list_head;
+ struct list_head *head = list_head, *orig_head = list_head, *pos;
BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
@@ -2247,6 +2247,9 @@ void bpf_list_head_free(const struct btf_field *field, void *list_head,
if (!head->next || list_empty(head))
goto unlock;
head = head->next;
+ /* Clear owner under spinlock to ensure the owner is always valid */
+ for (pos = head; pos != orig_head; pos = pos->next)
+ WRITE_ONCE(container_of(pos, struct bpf_list_node_kern, list_head)->owner, NULL);
unlock:
INIT_LIST_HEAD(orig_head);
__bpf_spin_unlock_irqrestore(spin_lock);
@@ -2255,7 +2258,9 @@ void bpf_list_head_free(const struct btf_field *field, void *list_head,
void *obj = head;
obj -= field->graph_root.node_offset;
+ pos = head;
head = head->next;
+ list_del_init(pos);
/* The contained type can also have resources, including a
* bpf_list_head which needs to be freed.
*/
--
2.50.1 (Apple Git-155)
^ permalink raw reply related [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v9 3/9] bpf: clear list node owner and unlink before drop
2026-03-29 14:05 ` [PATCH bpf-next v9 3/9] bpf: clear list node owner and unlink before drop Chengkaitao
@ 2026-03-29 14:45 ` bot+bpf-ci
0 siblings, 0 replies; 16+ messages in thread
From: bot+bpf-ci @ 2026-03-29 14:45 UTC (permalink / raw)
To: pilgrimtao, martin.lau, ast, daniel, andrii, eddyz87, song,
yonghong.song, john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah,
chengkaitao, linux-kselftest
Cc: bpf, linux-kernel, ast, andrii, daniel, martin.lau, eddyz87,
yonghong.song, clm, ihor.solodrai
[-- Attachment #1: Type: text/plain, Size: 2889 bytes --]
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 94fcd4ab39e9..8abb99712043 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -2247,6 +2247,9 @@ void bpf_list_head_free(const struct btf_field *field, void *list_head,
> if (!head->next || list_empty(head))
> goto unlock;
> head = head->next;
> + /* Clear owner under spinlock to ensure the owner is always valid */
> + for (pos = head; pos != orig_head; pos = pos->next)
> + WRITE_ONCE(container_of(pos, struct bpf_list_node_kern, list_head)->owner, NULL);
> unlock:
> INIT_LIST_HEAD(orig_head);
> __bpf_spin_unlock_irqrestore(spin_lock);
Setting owner to NULL under lock A is good for concurrent readers
(bpf_list_front/back), but could it open a window for concurrent
writers?
After the lock is released, the drain loop below processes each node
without holding any lock. Meanwhile, if a BPF program on another CPU
holds a refcounted owning reference to one of these nodes (obtained
earlier via bpf_refcount_acquire), it can attempt __bpf_list_add to
a different list under a different lock B.
In __bpf_list_add():
if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
...
}
list_add(n, prev);
Before this patch, owner pointed to the old list head (non-NULL), so
the cmpxchg would fail and the add would be rejected. With owner
cleared to NULL, the cmpxchg succeeds, and list_add() modifies the
node's next/prev to point into list B.
> @@ -2255,7 +2258,9 @@ void bpf_list_head_free(const struct btf_field *field, void *list_head,
> void *obj = head;
>
> obj -= field->graph_root.node_offset;
> + pos = head;
> head = head->next;
> + list_del_init(pos);
If the concurrent list_add above has already modified head->next, does
this drain loop then follow into list B's chain? The termination
condition (head != orig_head) would never be satisfied by list B's
nodes, and the loop would call __bpf_obj_drop_impl on list B's nodes
without holding lock B.
Would it be safer to also call list_del_init() under the spinlock
(e.g., splicing into a local list), so that nodes are fully detached
from the chain before the lock is released? The existing comment notes
that draining outside the lock avoids deadlocks from tracing programs,
but the list_del_init operations themselves should be inexpensive
compared to __bpf_obj_drop_impl.
This commit also appears to be a bug fix (addressing stale owner
pointers after drain) but does not include a Fixes: tag. The concurrent
reader issue was introduced when bpf_list_front/back was added.
Suggested tag:
Fixes: fb5b480205ba ("bpf: Add bpf_list_{front,back} kfunc")
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/23710972607
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next v9 4/9] bpf: Introduce the bpf_list_del kfunc.
2026-03-29 14:04 [PATCH bpf-next v9 0/9] bpf: Extend the bpf_list family of APIs Chengkaitao
` (2 preceding siblings ...)
2026-03-29 14:05 ` [PATCH bpf-next v9 3/9] bpf: clear list node owner and unlink before drop Chengkaitao
@ 2026-03-29 14:05 ` Chengkaitao
2026-03-29 14:05 ` [PATCH bpf-next v9 5/9] bpf: refactor __bpf_list_add to take insertion point via **prev_ptr Chengkaitao
` (4 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Chengkaitao @ 2026-03-29 14:05 UTC (permalink / raw)
To: martin.lau, ast, daniel, andrii, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, chengkaitao,
linux-kselftest
Cc: bpf, linux-kernel
From: Kaitao Cheng <chengkaitao@kylinos.cn>
Allow users to remove any node from a linked list.
We have added an additional parameter bpf_list_head *head to
bpf_list_del, as the verifier requires the head parameter to
check whether the lock is being held.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
kernel/bpf/helpers.c | 10 ++++++++++
kernel/bpf/verifier.c | 4 ++++
2 files changed, 14 insertions(+)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 8abb99712043..6dddb2377047 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2471,6 +2471,15 @@ __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
return __bpf_list_del(head, h->prev);
}
+__bpf_kfunc struct bpf_list_node *bpf_list_del(struct bpf_list_head *head,
+ struct bpf_list_node *node)
+{
+ struct bpf_list_node_kern *kn = (void *)node;
+
+ /* verifier to guarantee n is a list node rather than the head */
+ return __bpf_list_del(head, &kn->list_head);
+}
+
__bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head)
{
struct list_head *h = (struct list_head *)head;
@@ -4557,6 +4566,7 @@ BTF_ID_FLAGS(func, bpf_list_push_front_impl)
BTF_ID_FLAGS(func, bpf_list_push_back_impl)
BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_list_del, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f2d9863bb290..ae8d1f2e32de 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12480,6 +12480,7 @@ enum special_kfunc_type {
KF_bpf_list_push_back_impl,
KF_bpf_list_pop_front,
KF_bpf_list_pop_back,
+ KF_bpf_list_del,
KF_bpf_list_front,
KF_bpf_list_back,
KF_bpf_cast_to_kern_ctx,
@@ -12540,6 +12541,7 @@ BTF_ID(func, bpf_list_push_front_impl)
BTF_ID(func, bpf_list_push_back_impl)
BTF_ID(func, bpf_list_pop_front)
BTF_ID(func, bpf_list_pop_back)
+BTF_ID(func, bpf_list_del)
BTF_ID(func, bpf_list_front)
BTF_ID(func, bpf_list_back)
BTF_ID(func, bpf_cast_to_kern_ctx)
@@ -12617,6 +12619,7 @@ static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
KF_bpf_list_push_back_impl,
KF_bpf_list_pop_front,
KF_bpf_list_pop_back,
+ KF_bpf_list_del,
KF_bpf_list_front,
KF_bpf_list_back,
};
@@ -12625,6 +12628,7 @@ static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
static const enum special_kfunc_type bpf_list_node_api_kfuncs[] = {
KF_bpf_list_push_front_impl,
KF_bpf_list_push_back_impl,
+ KF_bpf_list_del,
};
/* Kfuncs that take an rbtree node argument (bpf_rb_node *). */
--
2.50.1 (Apple Git-155)
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH bpf-next v9 5/9] bpf: refactor __bpf_list_add to take insertion point via **prev_ptr
2026-03-29 14:04 [PATCH bpf-next v9 0/9] bpf: Extend the bpf_list family of APIs Chengkaitao
` (3 preceding siblings ...)
2026-03-29 14:05 ` [PATCH bpf-next v9 4/9] bpf: Introduce the bpf_list_del kfunc Chengkaitao
@ 2026-03-29 14:05 ` Chengkaitao
2026-03-29 14:05 ` [PATCH bpf-next v9 6/9] bpf: Add bpf_list_add_impl to insert node after a given list node Chengkaitao
` (3 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Chengkaitao @ 2026-03-29 14:05 UTC (permalink / raw)
To: martin.lau, ast, daniel, andrii, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, chengkaitao,
linux-kselftest
Cc: bpf, linux-kernel
From: Kaitao Cheng <chengkaitao@kylinos.cn>
Refactor __bpf_list_add to accept (node, head, struct list_head **prev_ptr,
..) instead of (node, head, bool tail, ..). Load prev from *prev_ptr after
INIT_LIST_HEAD(h), so we never dereference an uninitialized h->prev when
head was 0-initialized (e.g. push_back passes &h->prev).
When prev is not the list head, validate that prev is in the list via
its owner.
Prepares for bpf_list_add_impl(head, new, prev, ..) to insert after a
given list node.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
kernel/bpf/helpers.c | 36 ++++++++++++++++++++++++++----------
1 file changed, 26 insertions(+), 10 deletions(-)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 6dddb2377047..669e380746a6 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2386,9 +2386,11 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta
static int __bpf_list_add(struct bpf_list_node_kern *node,
struct bpf_list_head *head,
- bool tail, struct btf_record *rec, u64 off)
+ struct list_head **prev_ptr,
+ struct btf_record *rec, u64 off)
{
struct list_head *n = &node->list_head, *h = (void *)head;
+ struct list_head *prev;
/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
* called on its fields, so init here
@@ -2396,19 +2398,31 @@ static int __bpf_list_add(struct bpf_list_node_kern *node,
if (unlikely(!h->next))
INIT_LIST_HEAD(h);
+ prev = *prev_ptr;
+
+ /* When prev is not the list head, it must be a node in this list. */
+ if (prev != h) {
+ struct bpf_list_node_kern *prev_kn =
+ container_of(prev, struct bpf_list_node_kern, list_head);
+
+ if (unlikely(READ_ONCE(prev_kn->owner) != head))
+ goto fail;
+ }
+
/* node->owner != NULL implies !list_empty(n), no need to separately
* check the latter
*/
- if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
- /* Only called from BPF prog, no need to migrate_disable */
- __bpf_obj_drop_impl((void *)n - off, rec, false);
- return -EINVAL;
- }
+ if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON))
+ goto fail;
- tail ? list_add_tail(n, h) : list_add(n, h);
+ list_add(n, prev);
WRITE_ONCE(node->owner, head);
-
return 0;
+
+fail:
+ /* Only called from BPF prog, no need to migrate_disable */
+ __bpf_obj_drop_impl((void *)n - off, rec, false);
+ return -EINVAL;
}
__bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
@@ -2417,8 +2431,9 @@ __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
{
struct bpf_list_node_kern *n = (void *)node;
struct btf_struct_meta *meta = meta__ign;
+ struct list_head *h = (void *)head;
- return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
+ return __bpf_list_add(n, head, &h, meta ? meta->record : NULL, off);
}
__bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
@@ -2427,8 +2442,9 @@ __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
{
struct bpf_list_node_kern *n = (void *)node;
struct btf_struct_meta *meta = meta__ign;
+ struct list_head *h = (void *)head;
- return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
+ return __bpf_list_add(n, head, &h->prev, meta ? meta->record : NULL, off);
}
static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head,
--
2.50.1 (Apple Git-155)
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH bpf-next v9 6/9] bpf: Add bpf_list_add_impl to insert node after a given list node
2026-03-29 14:04 [PATCH bpf-next v9 0/9] bpf: Extend the bpf_list family of APIs Chengkaitao
` (4 preceding siblings ...)
2026-03-29 14:05 ` [PATCH bpf-next v9 5/9] bpf: refactor __bpf_list_add to take insertion point via **prev_ptr Chengkaitao
@ 2026-03-29 14:05 ` Chengkaitao
2026-03-29 14:05 ` [PATCH bpf-next v9 7/9] bpf: allow bpf_list_front/back result as the prev argument of bpf_list_add_impl Chengkaitao
` (2 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Chengkaitao @ 2026-03-29 14:05 UTC (permalink / raw)
To: martin.lau, ast, daniel, andrii, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, chengkaitao,
linux-kselftest
Cc: bpf, linux-kernel
From: Kaitao Cheng <chengkaitao@kylinos.cn>
Add a new kfunc bpf_list_add_impl(head, new, prev, meta, off) that
inserts 'new' after 'prev' in the BPF linked list. Both must be in
the same list; 'prev' must already be in the list. The new node must
be an owning reference (e.g. from bpf_obj_new); the kfunc consumes
that reference and the node becomes non-owning once inserted.
We have added an additional parameter bpf_list_head *head to
bpf_list_add_impl, as the verifier requires the head parameter to
check whether the lock is being held.
Returns 0 on success, -EINVAL if 'prev' is not in a list or 'new'
is already in a list (or duplicate insertion). On failure, the
kernel drops the passed-in node.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
kernel/bpf/helpers.c | 14 ++++++++++++++
kernel/bpf/verifier.c | 12 ++++++++++--
2 files changed, 24 insertions(+), 2 deletions(-)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 669e380746a6..68c83a009275 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2447,6 +2447,19 @@ __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
return __bpf_list_add(n, head, &h->prev, meta ? meta->record : NULL, off);
}
+__bpf_kfunc int bpf_list_add_impl(struct bpf_list_head *head,
+ struct bpf_list_node *new,
+ struct bpf_list_node *prev,
+ void *meta__ign, u64 off)
+{
+ struct bpf_list_node_kern *n = (void *)new, *p = (void *)prev;
+ struct btf_struct_meta *meta = meta__ign;
+ struct list_head *prev_ptr = &p->list_head;
+
+ return __bpf_list_add(n, head, &prev_ptr,
+ meta ? meta->record : NULL, off);
+}
+
static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head,
struct list_head *n)
{
@@ -4585,6 +4598,7 @@ BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_list_del, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_list_add_impl)
BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index ae8d1f2e32de..69dcf0105973 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12478,6 +12478,7 @@ enum special_kfunc_type {
KF_bpf_refcount_acquire_impl,
KF_bpf_list_push_front_impl,
KF_bpf_list_push_back_impl,
+ KF_bpf_list_add_impl,
KF_bpf_list_pop_front,
KF_bpf_list_pop_back,
KF_bpf_list_del,
@@ -12539,6 +12540,7 @@ BTF_ID(func, bpf_obj_drop_impl)
BTF_ID(func, bpf_refcount_acquire_impl)
BTF_ID(func, bpf_list_push_front_impl)
BTF_ID(func, bpf_list_push_back_impl)
+BTF_ID(func, bpf_list_add_impl)
BTF_ID(func, bpf_list_pop_front)
BTF_ID(func, bpf_list_pop_back)
BTF_ID(func, bpf_list_del)
@@ -12617,6 +12619,7 @@ BTF_ID(func, bpf_stream_print_stack)
static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
KF_bpf_list_push_front_impl,
KF_bpf_list_push_back_impl,
+ KF_bpf_list_add_impl,
KF_bpf_list_pop_front,
KF_bpf_list_pop_back,
KF_bpf_list_del,
@@ -12628,6 +12631,7 @@ static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
static const enum special_kfunc_type bpf_list_node_api_kfuncs[] = {
KF_bpf_list_push_front_impl,
KF_bpf_list_push_back_impl,
+ KF_bpf_list_add_impl,
KF_bpf_list_del,
};
@@ -12681,6 +12685,7 @@ static const enum special_kfunc_type bpf_stream_api_kfuncs[] = {
static const enum special_kfunc_type bpf_collection_insert_kfuncs[] = {
KF_bpf_list_push_front_impl,
KF_bpf_list_push_back_impl,
+ KF_bpf_list_add_impl,
KF_bpf_rbtree_add_impl,
};
@@ -23354,8 +23359,11 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int struct_meta_reg = BPF_REG_3;
int node_offset_reg = BPF_REG_4;
- /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
- if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
+ /* list/rbtree_add_impl have an extra arg (prev/less),
+ * so args-to-fixup are in different regs.
+ */
+ if (desc->func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
+ desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
struct_meta_reg = BPF_REG_4;
node_offset_reg = BPF_REG_5;
}
--
2.50.1 (Apple Git-155)
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH bpf-next v9 7/9] bpf: allow bpf_list_front/back result as the prev argument of bpf_list_add_impl
2026-03-29 14:04 [PATCH bpf-next v9 0/9] bpf: Extend the bpf_list family of APIs Chengkaitao
` (5 preceding siblings ...)
2026-03-29 14:05 ` [PATCH bpf-next v9 6/9] bpf: Add bpf_list_add_impl to insert node after a given list node Chengkaitao
@ 2026-03-29 14:05 ` Chengkaitao
2026-03-29 14:05 ` [PATCH bpf-next v9 8/9] bpf: add bpf_list_is_first/last/empty kfuncs Chengkaitao
2026-03-29 14:05 ` [PATCH bpf-next v9 9/9] selftests/bpf: Add test cases for bpf_list_del/add/is_first/is_last/empty Chengkaitao
8 siblings, 0 replies; 16+ messages in thread
From: Chengkaitao @ 2026-03-29 14:05 UTC (permalink / raw)
To: martin.lau, ast, daniel, andrii, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, chengkaitao,
linux-kselftest
Cc: bpf, linux-kernel
From: Kaitao Cheng <chengkaitao@kylinos.cn>
KF_ARG_PTR_TO_LIST_NODE normally requires an owning reference
(PTR_TO_BTF_ID | MEM_ALLOC and ref_obj_id). For bpf_list_add_impl's
third argument (prev), allow a non-owning reference with ref_obj_id==0
so that the result of bpf_list_front() or bpf_list_back() can be passed
as the insertion point. When prev is such a non-owning ref, skip the
MEM_ALLOC/ref_obj_id checks and jump to the shared list-node processing.
Owning refs (e.g. from pop + refcount_acquire) still pass the existing
checks and reach the same label.
Add BTF suffix __nonown_allowed (is_kfunc_arg_nonown_allowed) and
document it under kfuncs.rst.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
Documentation/bpf/kfuncs.rst | 20 +++++++++++++++++++-
kernel/bpf/helpers.c | 4 ++--
kernel/bpf/verifier.c | 13 +++++++++++++
3 files changed, 34 insertions(+), 3 deletions(-)
diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst
index 75e6c078e0e7..6760c547dd32 100644
--- a/Documentation/bpf/kfuncs.rst
+++ b/Documentation/bpf/kfuncs.rst
@@ -207,7 +207,25 @@ Here, the buffer may be NULL. If the buffer is not NULL, it must be at least
buffer__szk bytes in size. The kfunc is responsible for checking if the buffer
is NULL before using it.
-2.3.5 __str Annotation
+2.3.5 __nonown_allowed Annotation
+----------------------------------
+
+This annotation is used to indicate that the parameter may be a non-owning reference.
+
+An example is given below::
+
+ __bpf_kfunc int bpf_list_add_impl(..., struct bpf_list_node
+ *prev__nonown_allowed, ...)
+ {
+ ...
+ }
+
+For the ``prev__nonown_allowed`` parameter (resolved as ``KF_ARG_PTR_TO_LIST_NODE``),
+suffix ``__nonown_allowed`` retains the usual owning-pointer rules and also
+permits a non-owning reference with no ref_obj_id (e.g. the return value of
+bpf_list_front() / bpf_list_back()).
+
+2.3.6 __str Annotation
----------------------------
This annotation is used to indicate that the argument is a constant string.
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 68c83a009275..743341aae5c0 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2449,10 +2449,10 @@ __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
__bpf_kfunc int bpf_list_add_impl(struct bpf_list_head *head,
struct bpf_list_node *new,
- struct bpf_list_node *prev,
+ struct bpf_list_node *prev__nonown_allowed,
void *meta__ign, u64 off)
{
- struct bpf_list_node_kern *n = (void *)new, *p = (void *)prev;
+ struct bpf_list_node_kern *n = (void *)new, *p = (void *)prev__nonown_allowed;
struct btf_struct_meta *meta = meta__ign;
struct list_head *prev_ptr = &p->list_head;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 69dcf0105973..514a0aab93b8 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12265,6 +12265,11 @@ static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param
return btf_param_match_suffix(btf, arg, "__nullable");
}
+static bool is_kfunc_arg_nonown_allowed(const struct btf *btf, const struct btf_param *arg)
+{
+ return btf_param_match_suffix(btf, arg, "__nonown_allowed");
+}
+
static bool is_kfunc_arg_const_str(const struct btf *btf, const struct btf_param *arg)
{
return btf_param_match_suffix(btf, arg, "__str");
@@ -13736,6 +13741,13 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return ret;
break;
case KF_ARG_PTR_TO_LIST_NODE:
+ if (is_kfunc_arg_nonown_allowed(btf, &args[i]) &&
+ type_is_non_owning_ref(reg->type) && !reg->ref_obj_id) {
+ /* Allow bpf_list_front/back return value as
+ * list_add_impl's third arg (R3).
+ */
+ goto check_ok;
+ }
if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
verbose(env, "arg#%d expected pointer to allocated object\n", i);
return -EINVAL;
@@ -13744,6 +13756,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
verbose(env, "allocated object must be referenced\n");
return -EINVAL;
}
+check_ok:
ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta);
if (ret < 0)
return ret;
--
2.50.1 (Apple Git-155)
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH bpf-next v9 8/9] bpf: add bpf_list_is_first/last/empty kfuncs
2026-03-29 14:04 [PATCH bpf-next v9 0/9] bpf: Extend the bpf_list family of APIs Chengkaitao
` (6 preceding siblings ...)
2026-03-29 14:05 ` [PATCH bpf-next v9 7/9] bpf: allow bpf_list_front/back result as the prev argument of bpf_list_add_impl Chengkaitao
@ 2026-03-29 14:05 ` Chengkaitao
2026-03-29 14:05 ` [PATCH bpf-next v9 9/9] selftests/bpf: Add test cases for bpf_list_del/add/is_first/is_last/empty Chengkaitao
8 siblings, 0 replies; 16+ messages in thread
From: Chengkaitao @ 2026-03-29 14:05 UTC (permalink / raw)
To: martin.lau, ast, daniel, andrii, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, chengkaitao,
linux-kselftest
Cc: bpf, linux-kernel, Emil Tsalapatis
From: Kaitao Cheng <chengkaitao@kylinos.cn>
Add three kfuncs for BPF linked list queries:
- bpf_list_is_first(head, node): true if node is the first in the list.
- bpf_list_is_last(head, node): true if node is the last in the list.
- bpf_list_empty(head): true if the list has no entries.
Currently, without these kfuncs, to implement the above functionality
it is necessary to first call bpf_list_pop_front/back to retrieve the
first or last node before checking whether the passed-in node was the
first or last one. After the check, the node had to be pushed back into
the list using bpf_list_push_front/back, which was very inefficient.
Now, with the bpf_list_is_first/last/empty kfuncs, we can directly
check whether a node is the first, last, or whether the list is empty,
without having to first retrieve the node.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
---
kernel/bpf/helpers.c | 38 ++++++++++++++++++++++++++++++++++++++
kernel/bpf/verifier.c | 11 +++++++++++
2 files changed, 49 insertions(+)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 743341aae5c0..5f778da2e560 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2529,6 +2529,41 @@ __bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head)
return (struct bpf_list_node *)h->prev;
}
+__bpf_kfunc bool bpf_list_is_first(struct bpf_list_head *head, struct bpf_list_node *node)
+{
+ struct list_head *h = (struct list_head *)head;
+ struct bpf_list_node_kern *kn = (struct bpf_list_node_kern *)node;
+
+ if (READ_ONCE(kn->owner) != head)
+ return false;
+
+ return list_is_first(&kn->list_head, h);
+}
+
+__bpf_kfunc bool bpf_list_is_last(struct bpf_list_head *head, struct bpf_list_node *node)
+{
+ struct list_head *h = (struct list_head *)head;
+ struct bpf_list_node_kern *kn = (struct bpf_list_node_kern *)node;
+
+ if (READ_ONCE(kn->owner) != head)
+ return false;
+
+ return list_is_last(&kn->list_head, h);
+}
+
+__bpf_kfunc bool bpf_list_empty(struct bpf_list_head *head)
+{
+ struct list_head *h = (struct list_head *)head;
+
+ /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
+ * called on its fields, so init here
+ */
+ if (unlikely(!h->next))
+ INIT_LIST_HEAD(h);
+
+ return list_empty(h);
+}
+
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
struct bpf_rb_node *node)
{
@@ -4599,6 +4634,9 @@ BTF_ID_FLAGS(func, bpf_list_del, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_list_add_impl)
+BTF_ID_FLAGS(func, bpf_list_is_first)
+BTF_ID_FLAGS(func, bpf_list_is_last)
+BTF_ID_FLAGS(func, bpf_list_empty)
BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 514a0aab93b8..33fae482cd56 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12489,6 +12489,9 @@ enum special_kfunc_type {
KF_bpf_list_del,
KF_bpf_list_front,
KF_bpf_list_back,
+ KF_bpf_list_is_first,
+ KF_bpf_list_is_last,
+ KF_bpf_list_empty,
KF_bpf_cast_to_kern_ctx,
KF_bpf_rdonly_cast,
KF_bpf_rcu_read_lock,
@@ -12551,6 +12554,9 @@ BTF_ID(func, bpf_list_pop_back)
BTF_ID(func, bpf_list_del)
BTF_ID(func, bpf_list_front)
BTF_ID(func, bpf_list_back)
+BTF_ID(func, bpf_list_is_first)
+BTF_ID(func, bpf_list_is_last)
+BTF_ID(func, bpf_list_empty)
BTF_ID(func, bpf_cast_to_kern_ctx)
BTF_ID(func, bpf_rdonly_cast)
BTF_ID(func, bpf_rcu_read_lock)
@@ -12630,6 +12636,9 @@ static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
KF_bpf_list_del,
KF_bpf_list_front,
KF_bpf_list_back,
+ KF_bpf_list_is_first,
+ KF_bpf_list_is_last,
+ KF_bpf_list_empty,
};
/* Kfuncs that take a list node argument (bpf_list_node *). */
@@ -12638,6 +12647,8 @@ static const enum special_kfunc_type bpf_list_node_api_kfuncs[] = {
KF_bpf_list_push_back_impl,
KF_bpf_list_add_impl,
KF_bpf_list_del,
+ KF_bpf_list_is_first,
+ KF_bpf_list_is_last,
};
/* Kfuncs that take an rbtree node argument (bpf_rb_node *). */
--
2.50.1 (Apple Git-155)
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH bpf-next v9 9/9] selftests/bpf: Add test cases for bpf_list_del/add/is_first/is_last/empty
2026-03-29 14:04 [PATCH bpf-next v9 0/9] bpf: Extend the bpf_list family of APIs Chengkaitao
` (7 preceding siblings ...)
2026-03-29 14:05 ` [PATCH bpf-next v9 8/9] bpf: add bpf_list_is_first/last/empty kfuncs Chengkaitao
@ 2026-03-29 14:05 ` Chengkaitao
8 siblings, 0 replies; 16+ messages in thread
From: Chengkaitao @ 2026-03-29 14:05 UTC (permalink / raw)
To: martin.lau, ast, daniel, andrii, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, chengkaitao,
linux-kselftest
Cc: bpf, linux-kernel
From: Kaitao Cheng <chengkaitao@kylinos.cn>
Extend refcounted_kptr with tests for bpf_list_add (including prev from
bpf_list_front and bpf_refcount_acquire), bpf_list_del (including node
from bpf_rbtree_remove and bpf_refcount_acquire), bpf_list_empty,
bpf_list_is_first/last, and push_back on uninit head.
To verify the validity of bpf_list_del/add, the test also expects the
verifier to reject calls to bpf_list_del/add made without holding the
spin_lock.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
.../testing/selftests/bpf/bpf_experimental.h | 16 +
.../selftests/bpf/progs/refcounted_kptr.c | 311 ++++++++++++++++++
2 files changed, 327 insertions(+)
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 44466acf8083..5821f0000e1f 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -85,6 +85,22 @@ extern int bpf_list_push_back_impl(struct bpf_list_head *head,
/* Convenience macro to wrap over bpf_list_push_back_impl */
#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
+/* Description
+ * Insert 'new' after 'prev' in the BPF linked list with head 'head'.
+ * The bpf_spin_lock protecting the list must be held. 'prev' must already
+ * be in that list; 'new' must not be in any list. The 'meta' and 'off'
+ * parameters are rewritten by the verifier, no need for BPF programs to
+ * set them.
+ * Returns
+ * 0 on success, -EINVAL if head is NULL, prev is not in the list with head,
+ * or new is already in a list.
+ */
+extern int bpf_list_add_impl(struct bpf_list_head *head, struct bpf_list_node *new,
+ struct bpf_list_node *prev, void *meta, __u64 off) __ksym;
+
+/* Convenience macro to wrap over bpf_list_add_impl */
+#define bpf_list_add(head, new, prev) bpf_list_add_impl(head, new, prev, NULL, 0)
+
/* Description
* Remove the entry at the beginning of the BPF linked list.
* Returns
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
index c847398837cc..e5558994a76d 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
@@ -367,6 +367,317 @@ long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \
INSERT_STASH_READ(true, "insert_stash_read: remove from tree");
INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree");
+SEC("tc")
+__description("list_empty_test: list empty before add, non-empty after add")
+__success __retval(0)
+int list_empty_test(void *ctx)
+{
+ struct node_data *node_new;
+
+ bpf_spin_lock(&lock);
+ if (!bpf_list_empty(&head)) {
+ bpf_spin_unlock(&lock);
+ return -1;
+ }
+ bpf_spin_unlock(&lock);
+
+ node_new = bpf_obj_new(typeof(*node_new));
+ if (!node_new)
+ return -2;
+
+ bpf_spin_lock(&lock);
+ bpf_list_push_front(&head, &node_new->l);
+
+ if (bpf_list_empty(&head)) {
+ bpf_spin_unlock(&lock);
+ return -3;
+ }
+ bpf_spin_unlock(&lock);
+ return 0;
+}
+
+static struct node_data *__add_in_list(struct bpf_list_head *head,
+ struct bpf_spin_lock *lock)
+{
+ struct node_data *node_new, *node_ref;
+
+ node_new = bpf_obj_new(typeof(*node_new));
+ if (!node_new)
+ return NULL;
+
+ node_ref = bpf_refcount_acquire(node_new);
+
+ bpf_spin_lock(lock);
+ bpf_list_push_front(head, &node_new->l);
+ bpf_spin_unlock(lock);
+ return node_ref;
+}
+
+SEC("tc")
+__description("list_is_edge_test: is_first on first node, is_last on last node")
+__success __retval(0)
+int list_is_edge_test(void *ctx)
+{
+ struct node_data *node_first, *node_last;
+ int err = 0;
+
+ node_last = __add_in_list(&head, &lock);
+ if (!node_last)
+ return -1;
+
+ node_first = __add_in_list(&head, &lock);
+ if (!node_first) {
+ bpf_obj_drop(node_last);
+ return -2;
+ }
+
+ bpf_spin_lock(&lock);
+ if (!bpf_list_is_first(&head, &node_first->l)) {
+ err = -3;
+ goto fail;
+ }
+ if (!bpf_list_is_last(&head, &node_last->l))
+ err = -4;
+
+fail:
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(node_first);
+ bpf_obj_drop(node_last);
+ return err;
+}
+
+SEC("tc")
+__description("list_del_test1: del returns removed nodes")
+__success __retval(0)
+int list_del_test1(void *ctx)
+{
+ struct node_data *node_first, *node_last;
+ struct bpf_list_node *bpf_node_first, *bpf_node_last;
+ int err = 0;
+
+ node_last = __add_in_list(&head, &lock);
+ if (!node_last)
+ return -1;
+
+ node_first = __add_in_list(&head, &lock);
+ if (!node_first) {
+ bpf_obj_drop(node_last);
+ return -2;
+ }
+
+ bpf_spin_lock(&lock);
+ bpf_node_last = bpf_list_del(&head, &node_last->l);
+ bpf_node_first = bpf_list_del(&head, &node_first->l);
+ bpf_spin_unlock(&lock);
+
+ if (bpf_node_first)
+ bpf_obj_drop(container_of(bpf_node_first, struct node_data, l));
+ else
+ err = -3;
+
+ if (bpf_node_last)
+ bpf_obj_drop(container_of(bpf_node_last, struct node_data, l));
+ else
+ err = -4;
+
+ bpf_obj_drop(node_first);
+ bpf_obj_drop(node_last);
+ return err;
+}
+
+SEC("tc")
+__description("list_del_test2: remove an arbitrary node from the list")
+__success __retval(0)
+int list_del_test2(void *ctx)
+{
+ struct bpf_rb_node *rb;
+ struct bpf_list_node *l;
+ struct node_data *n;
+ long err;
+
+ err = __insert_in_tree_and_list(&head, &root, &lock);
+ if (err)
+ return err;
+
+ bpf_spin_lock(&lock);
+ rb = bpf_rbtree_first(&root);
+ if (!rb) {
+ bpf_spin_unlock(&lock);
+ return -4;
+ }
+
+ rb = bpf_rbtree_remove(&root, rb);
+ if (!rb) {
+ bpf_spin_unlock(&lock);
+ return -5;
+ }
+
+ n = container_of(rb, struct node_data, r);
+ l = bpf_list_del(&head, &n->l);
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(n);
+ if (!l)
+ return -6;
+
+ bpf_obj_drop(container_of(l, struct node_data, l));
+ return 0;
+}
+
+SEC("tc")
+__description("list_add_test1: insert new node after prev")
+__success __retval(0)
+int list_add_test1(void *ctx)
+{
+ struct node_data *node_first;
+ struct node_data *new_node;
+ long err = 0;
+
+ node_first = __add_in_list(&head, &lock);
+ if (!node_first)
+ return -1;
+
+ new_node = bpf_obj_new(typeof(*new_node));
+ if (!new_node) {
+ err = -2;
+ goto fail;
+ }
+
+ bpf_spin_lock(&lock);
+ err = bpf_list_add(&head, &new_node->l, &node_first->l);
+ bpf_spin_unlock(&lock);
+ if (err) {
+ err = -3;
+ goto fail;
+ }
+
+fail:
+ bpf_obj_drop(node_first);
+ return 0;
+}
+
+SEC("tc")
+__description("list_add_test2: list_add accepts list_front return value as prev")
+__success __retval(0)
+int list_add_test2(void *ctx)
+{
+ struct node_data *new_node, *tmp;
+ struct bpf_list_node *bpf_node;
+ long err = 0;
+
+ tmp = __add_in_list(&head, &lock);
+ if (!tmp)
+ return -1;
+
+ new_node = bpf_obj_new(typeof(*new_node));
+ if (!new_node) {
+ err = -2;
+ goto fail;
+ }
+
+ bpf_spin_lock(&lock);
+ bpf_node = bpf_list_front(&head);
+ if (!bpf_node) {
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(new_node);
+ err = -3;
+ goto fail;
+ }
+
+ err = bpf_list_add(&head, &new_node->l, bpf_node);
+ bpf_spin_unlock(&lock);
+ if (err) {
+ err = -4;
+ goto fail;
+ }
+
+fail:
+ bpf_obj_drop(tmp);
+ return err;
+}
+
+struct uninit_head_val {
+ struct bpf_spin_lock lock;
+ struct bpf_list_head head __contains(node_data, l);
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct uninit_head_val);
+ __uint(max_entries, 1);
+} uninit_head_map SEC(".maps");
+
+SEC("tc")
+__description("list_push_back_uninit_head: push_back on 0-initialized list head")
+__success __retval(0)
+int list_push_back_uninit_head(void *ctx)
+{
+ struct uninit_head_val *st;
+ struct node_data *node;
+ int ret = -1, key = 0;
+
+ st = bpf_map_lookup_elem(&uninit_head_map, &key);
+ if (!st)
+ return -1;
+
+ node = bpf_obj_new(typeof(*node));
+ if (!node)
+ return -1;
+
+ bpf_spin_lock(&st->lock);
+ ret = bpf_list_push_back(&st->head, &node->l);
+ bpf_spin_unlock(&st->lock);
+
+ return ret;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=32 must be held for bpf_list_head")
+long list_del_without_lock_fail(void *ctx)
+{
+ struct bpf_rb_node *rb;
+ struct bpf_list_node *l;
+ struct node_data *n;
+
+ bpf_spin_lock(&lock);
+ rb = bpf_rbtree_first(&root);
+ bpf_spin_unlock(&lock);
+ if (!rb)
+ return -1;
+
+ n = container_of(rb, struct node_data, r);
+ /* Error case: delete list node without holding lock */
+ l = bpf_list_del(&head, &n->l);
+ if (!l)
+ return -2;
+ bpf_obj_drop(container_of(l, struct node_data, l));
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=32 must be held for bpf_list_head")
+long list_add_without_lock_fail(void *ctx)
+{
+ struct bpf_rb_node *rb;
+ struct bpf_list_node *l;
+ struct node_data *n;
+
+ bpf_spin_lock(&lock);
+ rb = bpf_rbtree_first(&root);
+ l = bpf_list_front(&head);
+ bpf_spin_unlock(&lock);
+ if (!rb || !l)
+ return -1;
+
+ n = container_of(l, struct node_data, l);
+ /* Error case: add list node without holding lock */
+ if (bpf_list_add(&head, &n->l, l))
+ return -2;
+
+ return 0;
+}
+
SEC("tc")
__success
long rbtree_refcounted_node_ref_escapes(void *ctx)
--
2.50.1 (Apple Git-155)
^ permalink raw reply related [flat|nested] 16+ messages in thread