* [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS
@ 2026-03-18 23:42 Ihor Solodrai
2026-03-18 23:42 ` [PATCH bpf-next v3 2/2] selftests/bpf: Update kfuncs using btf_struct_meta to new variants Ihor Solodrai
` (3 more replies)
0 siblings, 4 replies; 21+ messages in thread
From: Ihor Solodrai @ 2026-03-18 23:42 UTC (permalink / raw)
To: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann,
Eduard Zingerman
Cc: bpf, kernel-team
The following kfuncs currently accept void *meta__ign argument:
* bpf_obj_new_impl
* bpf_obj_drop_impl
* bpf_percpu_obj_new_impl
* bpf_percpu_obj_drop_impl
* bpf_refcount_acquire_impl
* bpf_list_push_front_impl
* bpf_rbtree_add_impl
The __ign suffix is an indicator for the verifier to skip the argument
in check_kfunc_args(). Then, in fixup_kfunc_call() the verifier may
set the value of this argument to struct btf_struct_meta *
kptr_struct_meta from insn_aux_data.
BPF programs must pass a dummy NULL value when caling these kfuncs.
Additionally, the list and rbtree _impl kfuncs also accept an implicit
u64 argument, which doesn't require __ign suffix because it's a
scalar, and BPF programs explicitly pass 0.
Add new kfuncs with KF_IMPLICIT_ARGS [1], that correspond to each
_impl kfunc accepting meta__ign. The existing _impl kfuncs remain
unchanged for backwards compatibility.
To support this, add "btf_struct_meta" to the list of recognized
implicit argument types in resolve_btfids.
Implement is_kfunc_arg_implicit() in the verifier, that determines
implicit args by inspecting both (_impl and non-_impl) BTF prototypes
of the kfunc.
Update the special_kfunc_list in the verifier and relevant checks to
support both the old _impl and the new KF_IMPLICIT_ARGS variants of
btf_struct_meta users.
[1] https://lore.kernel.org/bpf/20260120222638.3976562-1-ihor.solodrai@linux.dev/
Signed-off-by: Ihor Solodrai <ihor.solodrai@linux.dev>
---
v1->v3: Nits suggested by AI
v1: https://lore.kernel.org/bpf/20260312193546.192786-1-ihor.solodrai@linux.dev/
---
kernel/bpf/helpers.c | 93 +++++++--
kernel/bpf/verifier.c | 184 +++++++++++++-----
tools/bpf/resolve_btfids/main.c | 1 +
.../selftests/bpf/progs/percpu_alloc_fail.c | 4 +-
4 files changed, 206 insertions(+), 76 deletions(-)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index cb6d242bd093..fc4d537c0b15 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2302,9 +2302,8 @@ void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
__bpf_kfunc_start_defs();
-__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
+__bpf_kfunc void *bpf_obj_new(u64 local_type_id__k, struct btf_struct_meta *meta)
{
- struct btf_struct_meta *meta = meta__ign;
u64 size = local_type_id__k;
void *p;
@@ -2313,17 +2312,28 @@ __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
return NULL;
if (meta)
bpf_obj_init(meta->record, p);
+
return p;
}
-__bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
+__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
+{
+ return bpf_obj_new(local_type_id__k, meta__ign);
+}
+
+__bpf_kfunc void *bpf_percpu_obj_new(u64 local_type_id__k, struct btf_struct_meta *meta)
{
u64 size = local_type_id__k;
- /* The verifier has ensured that meta__ign must be NULL */
+ /* The verifier has ensured that meta must be NULL */
return bpf_mem_alloc(&bpf_global_percpu_ma, size);
}
+__bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
+{
+ return bpf_percpu_obj_new(local_type_id__k, meta__ign);
+}
+
/* Must be called under migrate_disable(), as required by bpf_mem_free */
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
{
@@ -2347,23 +2357,31 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
bpf_mem_free_rcu(ma, p);
}
-__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
+__bpf_kfunc void bpf_obj_drop(void *p__alloc, struct btf_struct_meta *meta)
{
- struct btf_struct_meta *meta = meta__ign;
void *p = p__alloc;
__bpf_obj_drop_impl(p, meta ? meta->record : NULL, false);
}
-__bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
+__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
+{
+ return bpf_obj_drop(p__alloc, meta__ign);
+}
+
+__bpf_kfunc void bpf_percpu_obj_drop(void *p__alloc, struct btf_struct_meta *meta)
{
- /* The verifier has ensured that meta__ign must be NULL */
+ /* The verifier has ensured that meta must be NULL */
bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
}
-__bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
+__bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
+{
+ bpf_percpu_obj_drop(p__alloc, meta__ign);
+}
+
+__bpf_kfunc void *bpf_refcount_acquire(void *p__refcounted_kptr, struct btf_struct_meta *meta)
{
- struct btf_struct_meta *meta = meta__ign;
struct bpf_refcount *ref;
/* Could just cast directly to refcount_t *, but need some code using
@@ -2379,6 +2397,11 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta
return (void *)p__refcounted_kptr;
}
+__bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
+{
+ return bpf_refcount_acquire(p__refcounted_kptr, meta__ign);
+}
+
static int __bpf_list_add(struct bpf_list_node_kern *node,
struct bpf_list_head *head,
bool tail, struct btf_record *rec, u64 off)
@@ -2406,24 +2429,38 @@ static int __bpf_list_add(struct bpf_list_node_kern *node,
return 0;
}
+__bpf_kfunc int bpf_list_push_front(struct bpf_list_head *head,
+ struct bpf_list_node *node,
+ struct btf_struct_meta *meta,
+ u64 off)
+{
+ struct bpf_list_node_kern *n = (void *)node;
+
+ return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
+}
+
__bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
struct bpf_list_node *node,
void *meta__ign, u64 off)
+{
+ return bpf_list_push_front(head, node, meta__ign, off);
+}
+
+__bpf_kfunc int bpf_list_push_back(struct bpf_list_head *head,
+ struct bpf_list_node *node,
+ struct btf_struct_meta *meta,
+ u64 off)
{
struct bpf_list_node_kern *n = (void *)node;
- struct btf_struct_meta *meta = meta__ign;
- return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
+ return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
}
__bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
struct bpf_list_node *node,
void *meta__ign, u64 off)
{
- struct bpf_list_node_kern *n = (void *)node;
- struct btf_struct_meta *meta = meta__ign;
-
- return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
+ return bpf_list_push_back(head, node, meta__ign, off);
}
static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
@@ -2535,16 +2572,24 @@ static int __bpf_rbtree_add(struct bpf_rb_root *root,
return 0;
}
-__bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
- bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
- void *meta__ign, u64 off)
+__bpf_kfunc int bpf_rbtree_add(struct bpf_rb_root *root,
+ struct bpf_rb_node *node,
+ bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
+ struct btf_struct_meta *meta,
+ u64 off)
{
- struct btf_struct_meta *meta = meta__ign;
struct bpf_rb_node_kern *n = (void *)node;
return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
}
+__bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
+ bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
+ void *meta__ign, u64 off)
+{
+ return bpf_rbtree_add(root, node, less, meta__ign, off);
+}
+
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
{
struct rb_root_cached *r = (struct rb_root_cached *)root;
@@ -4536,12 +4581,19 @@ BTF_KFUNCS_START(generic_btf_ids)
#ifdef CONFIG_CRASH_DUMP
BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
#endif
+BTF_ID_FLAGS(func, bpf_obj_new, KF_ACQUIRE | KF_RET_NULL | KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_percpu_obj_new, KF_ACQUIRE | KF_RET_NULL | KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_obj_drop, KF_RELEASE | KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_percpu_obj_drop, KF_RELEASE | KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_refcount_acquire, KF_ACQUIRE | KF_RET_NULL | KF_RCU | KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
+BTF_ID_FLAGS(func, bpf_list_push_front, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_list_push_front_impl)
+BTF_ID_FLAGS(func, bpf_list_push_back, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_list_push_back_impl)
BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
@@ -4550,6 +4602,7 @@ BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_rbtree_add, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 01c18f4268de..25d0564de6bc 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12368,7 +12368,8 @@ enum {
KF_ARG_RES_SPIN_LOCK_ID,
KF_ARG_TASK_WORK_ID,
KF_ARG_PROG_AUX_ID,
- KF_ARG_TIMER_ID
+ KF_ARG_TIMER_ID,
+ KF_ARG_BTF_STRUCT_META,
};
BTF_ID_LIST(kf_arg_btf_ids)
@@ -12382,6 +12383,7 @@ BTF_ID(struct, bpf_res_spin_lock)
BTF_ID(struct, bpf_task_work)
BTF_ID(struct, bpf_prog_aux)
BTF_ID(struct, bpf_timer)
+BTF_ID(struct, btf_struct_meta)
static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
const struct btf_param *arg, int type)
@@ -12472,6 +12474,30 @@ static bool is_kfunc_arg_prog_aux(const struct btf *btf, const struct btf_param
return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_PROG_AUX_ID);
}
+/*
+ * A kfunc with KF_IMPLICIT_ARGS has two prototypes in BTF:
+ * - the _impl prototype with full arg list (this is meta->func_proto)
+ * - the BPF API prototype w/o implicit args (func->type in BTF)
+ * To determine whether an argument is implicit, we compare its position
+ * against the number of arguments of both prototypes.
+ */
+static bool is_kfunc_arg_implicit(const struct bpf_kfunc_call_arg_meta *meta, u32 arg_idx)
+{
+ const struct btf_type *func, *func_proto;
+ u32 argn, full_argn;
+
+ if (!(meta->kfunc_flags & KF_IMPLICIT_ARGS))
+ return false;
+
+ full_argn = btf_type_vlen(meta->func_proto);
+
+ func = btf_type_by_id(meta->btf, meta->func_id);
+ func_proto = btf_type_by_id(meta->btf, func->type);
+ argn = btf_type_vlen(func_proto);
+
+ return argn <= arg_idx && arg_idx < full_argn;
+}
+
/* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
const struct btf *btf,
@@ -12594,6 +12620,14 @@ enum special_kfunc_type {
KF_bpf_session_is_return,
KF_bpf_stream_vprintk,
KF_bpf_stream_print_stack,
+ KF_bpf_obj_new,
+ KF_bpf_percpu_obj_new,
+ KF_bpf_obj_drop,
+ KF_bpf_percpu_obj_drop,
+ KF_bpf_refcount_acquire,
+ KF_bpf_list_push_front,
+ KF_bpf_list_push_back,
+ KF_bpf_rbtree_add,
};
BTF_ID_LIST(special_kfunc_list)
@@ -12674,6 +12708,58 @@ BTF_ID(func, bpf_arena_reserve_pages)
BTF_ID(func, bpf_session_is_return)
BTF_ID(func, bpf_stream_vprintk)
BTF_ID(func, bpf_stream_print_stack)
+BTF_ID(func, bpf_obj_new)
+BTF_ID(func, bpf_percpu_obj_new)
+BTF_ID(func, bpf_obj_drop)
+BTF_ID(func, bpf_percpu_obj_drop)
+BTF_ID(func, bpf_refcount_acquire)
+BTF_ID(func, bpf_list_push_front)
+BTF_ID(func, bpf_list_push_back)
+BTF_ID(func, bpf_rbtree_add)
+
+static bool is_bpf_obj_new_kfunc(u32 func_id)
+{
+ return func_id == special_kfunc_list[KF_bpf_obj_new] ||
+ func_id == special_kfunc_list[KF_bpf_obj_new_impl];
+}
+
+static bool is_bpf_percpu_obj_new_kfunc(u32 func_id)
+{
+ return func_id == special_kfunc_list[KF_bpf_percpu_obj_new] ||
+ func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl];
+}
+
+static bool is_bpf_obj_drop_kfunc(u32 func_id)
+{
+ return func_id == special_kfunc_list[KF_bpf_obj_drop] ||
+ func_id == special_kfunc_list[KF_bpf_obj_drop_impl];
+}
+
+static bool is_bpf_percpu_obj_drop_kfunc(u32 func_id)
+{
+ return func_id == special_kfunc_list[KF_bpf_percpu_obj_drop] ||
+ func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl];
+}
+
+static bool is_bpf_refcount_acquire_kfunc(u32 func_id)
+{
+ return func_id == special_kfunc_list[KF_bpf_refcount_acquire] ||
+ func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl];
+}
+
+static bool is_bpf_list_push_kfunc(u32 func_id)
+{
+ return func_id == special_kfunc_list[KF_bpf_list_push_front] ||
+ func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
+ func_id == special_kfunc_list[KF_bpf_list_push_back] ||
+ func_id == special_kfunc_list[KF_bpf_list_push_back_impl];
+}
+
+static bool is_bpf_rbtree_add_kfunc(u32 func_id)
+{
+ return func_id == special_kfunc_list[KF_bpf_rbtree_add] ||
+ func_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
+}
static bool is_task_work_add_kfunc(u32 func_id)
{
@@ -12683,10 +12769,8 @@ static bool is_task_work_add_kfunc(u32 func_id)
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
{
- if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] &&
- meta->arg_owning_ref) {
+ if (is_bpf_refcount_acquire_kfunc(meta->func_id) && meta->arg_owning_ref)
return false;
- }
return meta->kfunc_flags & KF_RET_NULL;
}
@@ -13074,8 +13158,7 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
static bool is_bpf_list_api_kfunc(u32 btf_id)
{
- return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
- btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
+ return is_bpf_list_push_kfunc(btf_id) ||
btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
btf_id == special_kfunc_list[KF_bpf_list_pop_back] ||
btf_id == special_kfunc_list[KF_bpf_list_front] ||
@@ -13084,7 +13167,7 @@ static bool is_bpf_list_api_kfunc(u32 btf_id)
static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
{
- return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
+ return is_bpf_rbtree_add_kfunc(btf_id) ||
btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
btf_id == special_kfunc_list[KF_bpf_rbtree_first] ||
btf_id == special_kfunc_list[KF_bpf_rbtree_root] ||
@@ -13101,8 +13184,9 @@ static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
static bool is_bpf_graph_api_kfunc(u32 btf_id)
{
- return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) ||
- btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl];
+ return is_bpf_list_api_kfunc(btf_id) ||
+ is_bpf_rbtree_api_kfunc(btf_id) ||
+ is_bpf_refcount_acquire_kfunc(btf_id);
}
static bool is_bpf_res_spin_lock_kfunc(u32 btf_id)
@@ -13135,7 +13219,7 @@ static bool kfunc_spin_allowed(u32 btf_id)
static bool is_sync_callback_calling_kfunc(u32 btf_id)
{
- return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
+ return is_bpf_rbtree_add_kfunc(btf_id);
}
static bool is_async_callback_calling_kfunc(u32 btf_id)
@@ -13199,12 +13283,11 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
switch (node_field_type) {
case BPF_LIST_NODE:
- ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]);
+ ret = is_bpf_list_push_kfunc(kfunc_btf_id);
break;
case BPF_RB_NODE:
- ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
+ ret = (is_bpf_rbtree_add_kfunc(kfunc_btf_id) ||
+ kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]);
break;
@@ -13421,11 +13504,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
bool is_ret_buf_sz = false;
int kf_arg_type;
- t = btf_type_skip_modifiers(btf, args[i].type, NULL);
-
- if (is_kfunc_arg_ignore(btf, &args[i]))
- continue;
-
if (is_kfunc_arg_prog_aux(btf, &args[i])) {
/* Reject repeated use bpf_prog_aux */
if (meta->arg_prog) {
@@ -13437,6 +13515,11 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
continue;
}
+ if (is_kfunc_arg_ignore(btf, &args[i]) || is_kfunc_arg_implicit(meta, i))
+ continue;
+
+ t = btf_type_skip_modifiers(btf, args[i].type, NULL);
+
if (btf_type_is_scalar(t)) {
if (reg->type != SCALAR_VALUE) {
verbose(env, "R%d is not a scalar\n", regno);
@@ -13611,13 +13694,13 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
break;
case KF_ARG_PTR_TO_ALLOC_BTF_ID:
if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) {
- if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) {
- verbose(env, "arg#%d expected for bpf_obj_drop_impl()\n", i);
+ if (!is_bpf_obj_drop_kfunc(meta->func_id)) {
+ verbose(env, "arg#%d expected for bpf_obj_drop()\n", i);
return -EINVAL;
}
} else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) {
- if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) {
- verbose(env, "arg#%d expected for bpf_percpu_obj_drop_impl()\n", i);
+ if (!is_bpf_percpu_obj_drop_kfunc(meta->func_id)) {
+ verbose(env, "arg#%d expected for bpf_percpu_obj_drop()\n", i);
return -EINVAL;
}
} else {
@@ -13743,7 +13826,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return ret;
break;
case KF_ARG_PTR_TO_RB_NODE:
- if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
+ if (is_bpf_rbtree_add_kfunc(meta->func_id)) {
if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
verbose(env, "arg#%d expected pointer to allocated object\n", i);
return -EINVAL;
@@ -13980,13 +14063,12 @@ static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_ca
if (meta->btf != btf_vmlinux)
return 0;
- if (meta->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
- meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
+ if (is_bpf_obj_new_kfunc(meta->func_id) || is_bpf_percpu_obj_new_kfunc(meta->func_id)) {
struct btf_struct_meta *struct_meta;
struct btf *ret_btf;
u32 ret_btf_id;
- if (meta->func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set)
+ if (is_bpf_obj_new_kfunc(meta->func_id) && !bpf_global_ma_set)
return -ENOMEM;
if (((u64)(u32)meta->arg_constant.value) != meta->arg_constant.value) {
@@ -14009,7 +14091,7 @@ static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_ca
return -EINVAL;
}
- if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
+ if (is_bpf_percpu_obj_new_kfunc(meta->func_id)) {
if (ret_t->size > BPF_GLOBAL_PERCPU_MA_MAX_SIZE) {
verbose(env, "bpf_percpu_obj_new type size (%d) is greater than %d\n",
ret_t->size, BPF_GLOBAL_PERCPU_MA_MAX_SIZE);
@@ -14039,7 +14121,7 @@ static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_ca
}
struct_meta = btf_find_struct_meta(ret_btf, ret_btf_id);
- if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
+ if (is_bpf_percpu_obj_new_kfunc(meta->func_id)) {
if (!__btf_type_is_scalar_struct(env, ret_btf, ret_t, 0)) {
verbose(env, "bpf_percpu_obj_new type ID argument must be of a struct of scalars\n");
return -EINVAL;
@@ -14055,12 +14137,12 @@ static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_ca
regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
regs[BPF_REG_0].btf = ret_btf;
regs[BPF_REG_0].btf_id = ret_btf_id;
- if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl])
+ if (is_bpf_percpu_obj_new_kfunc(meta->func_id))
regs[BPF_REG_0].type |= MEM_PERCPU;
insn_aux->obj_new_size = ret_t->size;
insn_aux->kptr_struct_meta = struct_meta;
- } else if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
+ } else if (is_bpf_refcount_acquire_kfunc(meta->func_id)) {
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
regs[BPF_REG_0].btf = meta->arg_btf;
@@ -14226,7 +14308,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (err < 0)
return err;
- if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
+ if (is_bpf_rbtree_add_kfunc(meta.func_id)) {
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_rbtree_add_callback_state);
if (err) {
@@ -14340,9 +14422,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return err;
}
- if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
- meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
- meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
+ if (is_bpf_list_push_kfunc(meta.func_id) || is_bpf_rbtree_add_kfunc(meta.func_id)) {
release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
insn_aux->insert_off = regs[BPF_REG_2].var_off.value;
insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id);
@@ -14390,11 +14470,10 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL);
if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
- /* Only exception is bpf_obj_new_impl */
if (meta.btf != btf_vmlinux ||
- (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] &&
- meta.func_id != special_kfunc_list[KF_bpf_percpu_obj_new_impl] &&
- meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) {
+ (!is_bpf_obj_new_kfunc(meta.func_id) &&
+ !is_bpf_percpu_obj_new_kfunc(meta.func_id) &&
+ !is_bpf_refcount_acquire_kfunc(meta.func_id))) {
verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
return -EINVAL;
}
@@ -14505,8 +14584,8 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
regs[BPF_REG_0].id = ++env->id_gen;
} else if (btf_type_is_void(t)) {
if (meta.btf == btf_vmlinux) {
- if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl] ||
- meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) {
+ if (is_bpf_obj_drop_kfunc(meta.func_id) ||
+ is_bpf_percpu_obj_drop_kfunc(meta.func_id)) {
insn_aux->kptr_struct_meta =
btf_find_struct_meta(meta.arg_btf,
meta.arg_btf_id);
@@ -23312,13 +23391,12 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (!bpf_jit_supports_far_kfunc_call())
insn->imm = BPF_CALL_IMM(desc->addr);
- if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
- desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
+ if (is_bpf_obj_new_kfunc(desc->func_id) || is_bpf_percpu_obj_new_kfunc(desc->func_id)) {
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size;
- if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) {
+ if (is_bpf_percpu_obj_new_kfunc(desc->func_id) && kptr_struct_meta) {
verifier_bug(env, "NULL kptr_struct_meta expected at insn_idx %d",
insn_idx);
return -EFAULT;
@@ -23329,20 +23407,19 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
insn_buf[2] = addr[1];
insn_buf[3] = *insn;
*cnt = 4;
- } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] ||
- desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] ||
- desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
+ } else if (is_bpf_obj_drop_kfunc(desc->func_id) ||
+ is_bpf_percpu_obj_drop_kfunc(desc->func_id) ||
+ is_bpf_refcount_acquire_kfunc(desc->func_id)) {
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
- if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) {
+ if (is_bpf_percpu_obj_drop_kfunc(desc->func_id) && kptr_struct_meta) {
verifier_bug(env, "NULL kptr_struct_meta expected at insn_idx %d",
insn_idx);
return -EFAULT;
}
- if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] &&
- !kptr_struct_meta) {
+ if (is_bpf_refcount_acquire_kfunc(desc->func_id) && !kptr_struct_meta) {
verifier_bug(env, "kptr_struct_meta expected at insn_idx %d",
insn_idx);
return -EFAULT;
@@ -23352,15 +23429,14 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
insn_buf[1] = addr[1];
insn_buf[2] = *insn;
*cnt = 3;
- } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
- desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
- desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
+ } else if (is_bpf_list_push_kfunc(desc->func_id) ||
+ is_bpf_rbtree_add_kfunc(desc->func_id)) {
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
int struct_meta_reg = BPF_REG_3;
int node_offset_reg = BPF_REG_4;
/* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
- if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
+ if (is_bpf_rbtree_add_kfunc(desc->func_id)) {
struct_meta_reg = BPF_REG_4;
node_offset_reg = BPF_REG_5;
}
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index 5208f650080f..f8a91fa7584f 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -1065,6 +1065,7 @@ static bool is_kf_implicit_arg(const struct btf *btf, const struct btf_param *p)
{
static const char *const kf_implicit_arg_types[] = {
"bpf_prog_aux",
+ "btf_struct_meta",
};
const struct btf_type *t;
const char *name;
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
index f2b8eb2ff76f..81813c724fa9 100644
--- a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
@@ -110,7 +110,7 @@ int BPF_PROG(test_array_map_3)
}
SEC("?fentry.s/bpf_fentry_test1")
-__failure __msg("arg#0 expected for bpf_percpu_obj_drop_impl()")
+__failure __msg("arg#0 expected for bpf_percpu_obj_drop()")
int BPF_PROG(test_array_map_4)
{
struct val_t __percpu_kptr *p;
@@ -124,7 +124,7 @@ int BPF_PROG(test_array_map_4)
}
SEC("?fentry.s/bpf_fentry_test1")
-__failure __msg("arg#0 expected for bpf_obj_drop_impl()")
+__failure __msg("arg#0 expected for bpf_obj_drop()")
int BPF_PROG(test_array_map_5)
{
struct val_t *p;
--
2.53.0
^ permalink raw reply related [flat|nested] 21+ messages in thread* [PATCH bpf-next v3 2/2] selftests/bpf: Update kfuncs using btf_struct_meta to new variants 2026-03-18 23:42 [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS Ihor Solodrai @ 2026-03-18 23:42 ` Ihor Solodrai 2026-03-19 12:30 ` Jiri Olsa 2026-03-20 14:50 ` Mykyta Yatsenko 2026-03-19 12:25 ` [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS Jiri Olsa ` (2 subsequent siblings) 3 siblings, 2 replies; 21+ messages in thread From: Ihor Solodrai @ 2026-03-18 23:42 UTC (permalink / raw) To: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman Cc: bpf, kernel-team Update selftests to use the new non-_impl kfuncs marked with KF_IMPLICIT_ARGS by removing redundant declarations and macros from bpf_experimental.h (the new kfuncs are present in the vmlinux.h) and updating relevant callsites. Fix spin_lock verifier-log matching for lock_id_kptr_preserve by accepting variable instruction numbers. The calls to kfuncs with implicit arguments do not have register moves (e.g. r5 = 0) corresponding to dummy arguments anymore, so the order of instructions has shifted. Signed-off-by: Ihor Solodrai <ihor.solodrai@linux.dev> --- .../testing/selftests/bpf/bpf_experimental.h | 156 +----------------- .../selftests/bpf/prog_tests/spin_lock.c | 5 +- .../selftests/bpf/progs/kptr_xchg_inline.c | 4 +- 3 files changed, 9 insertions(+), 156 deletions(-) diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 44466acf8083..2234bd6bc9d3 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -8,156 +8,11 @@ #define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node))) -/* Description - * Allocates an object of the type represented by 'local_type_id' in - * program BTF. User may use the bpf_core_type_id_local macro to pass the - * type ID of a struct in program BTF. - * - * The 'local_type_id' parameter must be a known constant. - * The 'meta' parameter is rewritten by the verifier, no need for BPF - * program to set it. - * Returns - * A pointer to an object of the type corresponding to the passed in - * 'local_type_id', or NULL on failure. - */ -extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; - -/* Convenience macro to wrap over bpf_obj_new_impl */ -#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL)) - -/* Description - * Free an allocated object. All fields of the object that require - * destruction will be destructed before the storage is freed. - * - * The 'meta' parameter is rewritten by the verifier, no need for BPF - * program to set it. - * Returns - * Void. - */ -extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; - -/* Convenience macro to wrap over bpf_obj_drop_impl */ -#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL) - -/* Description - * Increment the refcount on a refcounted local kptr, turning the - * non-owning reference input into an owning reference in the process. - * - * The 'meta' parameter is rewritten by the verifier, no need for BPF - * program to set it. - * Returns - * An owning reference to the object pointed to by 'kptr' - */ -extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym; - -/* Convenience macro to wrap over bpf_refcount_acquire_impl */ -#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL) - -/* Description - * Add a new entry to the beginning of the BPF linked list. - * - * The 'meta' and 'off' parameters are rewritten by the verifier, no need - * for BPF programs to set them - * Returns - * 0 if the node was successfully added - * -EINVAL if the node wasn't added because it's already in a list - */ -extern int bpf_list_push_front_impl(struct bpf_list_head *head, - struct bpf_list_node *node, - void *meta, __u64 off) __ksym; - -/* Convenience macro to wrap over bpf_list_push_front_impl */ -#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0) - -/* Description - * Add a new entry to the end of the BPF linked list. - * - * The 'meta' and 'off' parameters are rewritten by the verifier, no need - * for BPF programs to set them - * Returns - * 0 if the node was successfully added - * -EINVAL if the node wasn't added because it's already in a list - */ -extern int bpf_list_push_back_impl(struct bpf_list_head *head, - struct bpf_list_node *node, - void *meta, __u64 off) __ksym; - -/* Convenience macro to wrap over bpf_list_push_back_impl */ -#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0) - -/* Description - * Remove the entry at the beginning of the BPF linked list. - * Returns - * Pointer to bpf_list_node of deleted entry, or NULL if list is empty. - */ -extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym; - -/* Description - * Remove the entry at the end of the BPF linked list. - * Returns - * Pointer to bpf_list_node of deleted entry, or NULL if list is empty. - */ -extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; - -/* Description - * Remove 'node' from rbtree with root 'root' - * Returns - * Pointer to the removed node, or NULL if 'root' didn't contain 'node' - */ -extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, - struct bpf_rb_node *node) __ksym; - -/* Description - * Add 'node' to rbtree with root 'root' using comparator 'less' - * - * The 'meta' and 'off' parameters are rewritten by the verifier, no need - * for BPF programs to set them - * Returns - * 0 if the node was successfully added - * -EINVAL if the node wasn't added because it's already in a tree - */ -extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, - bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), - void *meta, __u64 off) __ksym; - -/* Convenience macro to wrap over bpf_rbtree_add_impl */ -#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) +/* Convenience macro to wrap over bpf_obj_new */ +#define bpf_obj_new(type) ((type *)bpf_obj_new(bpf_core_type_id_local(type))) -/* Description - * Return the first (leftmost) node in input tree - * Returns - * Pointer to the node, which is _not_ removed from the tree. If the tree - * contains no nodes, returns NULL. - */ -extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; - -/* Description - * Allocates a percpu object of the type represented by 'local_type_id' in - * program BTF. User may use the bpf_core_type_id_local macro to pass the - * type ID of a struct in program BTF. - * - * The 'local_type_id' parameter must be a known constant. - * The 'meta' parameter is rewritten by the verifier, no need for BPF - * program to set it. - * Returns - * A pointer to a percpu object of the type corresponding to the passed in - * 'local_type_id', or NULL on failure. - */ -extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym; - -/* Convenience macro to wrap over bpf_percpu_obj_new_impl */ -#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL)) - -/* Description - * Free an allocated percpu object. All fields of the object that require - * destruction will be destructed before the storage is freed. - * - * The 'meta' parameter is rewritten by the verifier, no need for BPF - * program to set it. - * Returns - * Void. - */ -extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym; +/* Convenience macro to wrap over bpf_percpu_obj_new */ +#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new(bpf_core_type_id_local(type))) struct bpf_iter_task_vma; @@ -167,9 +22,6 @@ extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym; extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym; -/* Convenience macro to wrap over bpf_obj_drop_impl */ -#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL) - /* Description * Throw a BPF exception from the program, immediately terminating its * execution and unwinding the stack. The supplied 'cookie' parameter diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c index 254fbfeab06a..bbe476f4c47d 100644 --- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c @@ -13,8 +13,9 @@ static struct { const char *err_msg; } spin_lock_fail_tests[] = { { "lock_id_kptr_preserve", - "5: (bf) r1 = r0 ; R0=ptr_foo(id=2,ref_obj_id=2) " - "R1=ptr_foo(id=2,ref_obj_id=2) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n" + "[0-9]\\+: (bf) r1 = r0 ; R0=ptr_foo(id=2,ref_obj_id=2)" + " R1=ptr_foo(id=2,ref_obj_id=2) refs=2\n" + "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n" "R1 type=ptr_ expected=percpu_ptr_" }, { "lock_id_global_zero", "; R1=map_value(map=.data.A,ks=4,vs=4)\n2: (85) call bpf_this_cpu_ptr#154\n" diff --git a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c index 2414ac20b6d5..ca5943166057 100644 --- a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c +++ b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c @@ -25,14 +25,14 @@ __naked int kptr_xchg_inline(void) "if r0 == 0 goto 1f;" "r1 = r0;" "r2 = 0;" - "call %[bpf_obj_drop_impl];" + "call %[bpf_obj_drop];" "1:" "r0 = 0;" "exit;" : : __imm_addr(ptr), __imm(bpf_kptr_xchg), - __imm(bpf_obj_drop_impl) + __imm(bpf_obj_drop) : __clobber_all ); } -- 2.53.0 ^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 2/2] selftests/bpf: Update kfuncs using btf_struct_meta to new variants 2026-03-18 23:42 ` [PATCH bpf-next v3 2/2] selftests/bpf: Update kfuncs using btf_struct_meta to new variants Ihor Solodrai @ 2026-03-19 12:30 ` Jiri Olsa 2026-03-19 20:43 ` Ihor Solodrai 2026-03-20 14:50 ` Mykyta Yatsenko 1 sibling, 1 reply; 21+ messages in thread From: Jiri Olsa @ 2026-03-19 12:30 UTC (permalink / raw) To: Ihor Solodrai Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, kernel-team On Wed, Mar 18, 2026 at 04:42:10PM -0700, Ihor Solodrai wrote: > Update selftests to use the new non-_impl kfuncs marked with > KF_IMPLICIT_ARGS by removing redundant declarations and macros from > bpf_experimental.h (the new kfuncs are present in the vmlinux.h) and > updating relevant callsites. > > Fix spin_lock verifier-log matching for lock_id_kptr_preserve by > accepting variable instruction numbers. The calls to kfuncs with > implicit arguments do not have register moves (e.g. r5 = 0) > corresponding to dummy arguments anymore, so the order of instructions > has shifted. > > Signed-off-by: Ihor Solodrai <ihor.solodrai@linux.dev> > --- > .../testing/selftests/bpf/bpf_experimental.h | 156 +----------------- > .../selftests/bpf/prog_tests/spin_lock.c | 5 +- > .../selftests/bpf/progs/kptr_xchg_inline.c | 4 +- > 3 files changed, 9 insertions(+), 156 deletions(-) > > diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h > index 44466acf8083..2234bd6bc9d3 100644 > --- a/tools/testing/selftests/bpf/bpf_experimental.h > +++ b/tools/testing/selftests/bpf/bpf_experimental.h > @@ -8,156 +8,11 @@ > > #define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node))) > > -/* Description > - * Allocates an object of the type represented by 'local_type_id' in > - * program BTF. User may use the bpf_core_type_id_local macro to pass the > - * type ID of a struct in program BTF. > - * > - * The 'local_type_id' parameter must be a known constant. > - * The 'meta' parameter is rewritten by the verifier, no need for BPF > - * program to set it. > - * Returns > - * A pointer to an object of the type corresponding to the passed in > - * 'local_type_id', or NULL on failure. > - */ > -extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; > - > -/* Convenience macro to wrap over bpf_obj_new_impl */ > -#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL)) > - > -/* Description > - * Free an allocated object. All fields of the object that require > - * destruction will be destructed before the storage is freed. > - * > - * The 'meta' parameter is rewritten by the verifier, no need for BPF > - * program to set it. > - * Returns > - * Void. > - */ > -extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; > - > -/* Convenience macro to wrap over bpf_obj_drop_impl */ > -#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL) > - > -/* Description > - * Increment the refcount on a refcounted local kptr, turning the > - * non-owning reference input into an owning reference in the process. > - * > - * The 'meta' parameter is rewritten by the verifier, no need for BPF > - * program to set it. > - * Returns > - * An owning reference to the object pointed to by 'kptr' > - */ > -extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym; > - > -/* Convenience macro to wrap over bpf_refcount_acquire_impl */ > -#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL) > - > -/* Description > - * Add a new entry to the beginning of the BPF linked list. > - * > - * The 'meta' and 'off' parameters are rewritten by the verifier, no need > - * for BPF programs to set them > - * Returns > - * 0 if the node was successfully added > - * -EINVAL if the node wasn't added because it's already in a list > - */ > -extern int bpf_list_push_front_impl(struct bpf_list_head *head, > - struct bpf_list_node *node, > - void *meta, __u64 off) __ksym; > - > -/* Convenience macro to wrap over bpf_list_push_front_impl */ > -#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0) > - > -/* Description > - * Add a new entry to the end of the BPF linked list. > - * > - * The 'meta' and 'off' parameters are rewritten by the verifier, no need > - * for BPF programs to set them > - * Returns > - * 0 if the node was successfully added > - * -EINVAL if the node wasn't added because it's already in a list > - */ > -extern int bpf_list_push_back_impl(struct bpf_list_head *head, > - struct bpf_list_node *node, > - void *meta, __u64 off) __ksym; > - > -/* Convenience macro to wrap over bpf_list_push_back_impl */ > -#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0) > - > -/* Description > - * Remove the entry at the beginning of the BPF linked list. > - * Returns > - * Pointer to bpf_list_node of deleted entry, or NULL if list is empty. > - */ > -extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym; > - > -/* Description > - * Remove the entry at the end of the BPF linked list. > - * Returns > - * Pointer to bpf_list_node of deleted entry, or NULL if list is empty. > - */ > -extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; > - > -/* Description > - * Remove 'node' from rbtree with root 'root' > - * Returns > - * Pointer to the removed node, or NULL if 'root' didn't contain 'node' > - */ > -extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, > - struct bpf_rb_node *node) __ksym; > - > -/* Description > - * Add 'node' to rbtree with root 'root' using comparator 'less' > - * > - * The 'meta' and 'off' parameters are rewritten by the verifier, no need > - * for BPF programs to set them > - * Returns > - * 0 if the node was successfully added > - * -EINVAL if the node wasn't added because it's already in a tree > - */ > -extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, > - bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), > - void *meta, __u64 off) __ksym; > - > -/* Convenience macro to wrap over bpf_rbtree_add_impl */ > -#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) > +/* Convenience macro to wrap over bpf_obj_new */ > +#define bpf_obj_new(type) ((type *)bpf_obj_new(bpf_core_type_id_local(type))) > > -/* Description > - * Return the first (leftmost) node in input tree > - * Returns > - * Pointer to the node, which is _not_ removed from the tree. If the tree > - * contains no nodes, returns NULL. > - */ > -extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; > - > -/* Description > - * Allocates a percpu object of the type represented by 'local_type_id' in > - * program BTF. User may use the bpf_core_type_id_local macro to pass the > - * type ID of a struct in program BTF. > - * > - * The 'local_type_id' parameter must be a known constant. > - * The 'meta' parameter is rewritten by the verifier, no need for BPF > - * program to set it. > - * Returns > - * A pointer to a percpu object of the type corresponding to the passed in > - * 'local_type_id', or NULL on failure. > - */ > -extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym; > - > -/* Convenience macro to wrap over bpf_percpu_obj_new_impl */ > -#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL)) > - > -/* Description > - * Free an allocated percpu object. All fields of the object that require > - * destruction will be destructed before the storage is freed. > - * > - * The 'meta' parameter is rewritten by the verifier, no need for BPF > - * program to set it. > - * Returns > - * Void. > - */ > -extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym; > +/* Convenience macro to wrap over bpf_percpu_obj_new */ > +#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new(bpf_core_type_id_local(type))) nit, seems like a shame to remove all the comments which are still valid, maybe we could leave those externs with comments and just drop the _impl suffix jirka ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 2/2] selftests/bpf: Update kfuncs using btf_struct_meta to new variants 2026-03-19 12:30 ` Jiri Olsa @ 2026-03-19 20:43 ` Ihor Solodrai 2026-03-20 11:06 ` Jiri Olsa 0 siblings, 1 reply; 21+ messages in thread From: Ihor Solodrai @ 2026-03-19 20:43 UTC (permalink / raw) To: Jiri Olsa Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, kernel-team On 3/19/26 5:30 AM, Jiri Olsa wrote: > On Wed, Mar 18, 2026 at 04:42:10PM -0700, Ihor Solodrai wrote: >> Update selftests to use the new non-_impl kfuncs marked with >> KF_IMPLICIT_ARGS by removing redundant declarations and macros from >> bpf_experimental.h (the new kfuncs are present in the vmlinux.h) and >> updating relevant callsites. >> >> Fix spin_lock verifier-log matching for lock_id_kptr_preserve by >> accepting variable instruction numbers. The calls to kfuncs with >> implicit arguments do not have register moves (e.g. r5 = 0) >> corresponding to dummy arguments anymore, so the order of instructions >> has shifted. >> >> Signed-off-by: Ihor Solodrai <ihor.solodrai@linux.dev> >> --- >> .../testing/selftests/bpf/bpf_experimental.h | 156 +----------------- >> .../selftests/bpf/prog_tests/spin_lock.c | 5 +- >> .../selftests/bpf/progs/kptr_xchg_inline.c | 4 +- >> 3 files changed, 9 insertions(+), 156 deletions(-) >> >> diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h >> index 44466acf8083..2234bd6bc9d3 100644 >> --- a/tools/testing/selftests/bpf/bpf_experimental.h >> +++ b/tools/testing/selftests/bpf/bpf_experimental.h >> @@ -8,156 +8,11 @@ >> >> [...] >> - >> -/* Description >> - * Free an allocated percpu object. All fields of the object that require >> - * destruction will be destructed before the storage is freed. >> - * >> - * The 'meta' parameter is rewritten by the verifier, no need for BPF >> - * program to set it. >> - * Returns >> - * Void. >> - */ >> -extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym; >> +/* Convenience macro to wrap over bpf_percpu_obj_new */ >> +#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new(bpf_core_type_id_local(type))) > > nit, seems like a shame to remove all the comments which are still > valid, maybe we could leave those externs with comments and just > drop the _impl suffix I assumed they were copy-pasted from somewhere, but apparently no. How about we move the useful comments to kernel/helpers.c, where the kfuncs are defined? > > jirka ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 2/2] selftests/bpf: Update kfuncs using btf_struct_meta to new variants 2026-03-19 20:43 ` Ihor Solodrai @ 2026-03-20 11:06 ` Jiri Olsa 0 siblings, 0 replies; 21+ messages in thread From: Jiri Olsa @ 2026-03-20 11:06 UTC (permalink / raw) To: Ihor Solodrai Cc: Jiri Olsa, Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, kernel-team On Thu, Mar 19, 2026 at 01:43:35PM -0700, Ihor Solodrai wrote: > On 3/19/26 5:30 AM, Jiri Olsa wrote: > > On Wed, Mar 18, 2026 at 04:42:10PM -0700, Ihor Solodrai wrote: > >> Update selftests to use the new non-_impl kfuncs marked with > >> KF_IMPLICIT_ARGS by removing redundant declarations and macros from > >> bpf_experimental.h (the new kfuncs are present in the vmlinux.h) and > >> updating relevant callsites. > >> > >> Fix spin_lock verifier-log matching for lock_id_kptr_preserve by > >> accepting variable instruction numbers. The calls to kfuncs with > >> implicit arguments do not have register moves (e.g. r5 = 0) > >> corresponding to dummy arguments anymore, so the order of instructions > >> has shifted. > >> > >> Signed-off-by: Ihor Solodrai <ihor.solodrai@linux.dev> > >> --- > >> .../testing/selftests/bpf/bpf_experimental.h | 156 +----------------- > >> .../selftests/bpf/prog_tests/spin_lock.c | 5 +- > >> .../selftests/bpf/progs/kptr_xchg_inline.c | 4 +- > >> 3 files changed, 9 insertions(+), 156 deletions(-) > >> > >> diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h > >> index 44466acf8083..2234bd6bc9d3 100644 > >> --- a/tools/testing/selftests/bpf/bpf_experimental.h > >> +++ b/tools/testing/selftests/bpf/bpf_experimental.h > >> @@ -8,156 +8,11 @@ > >> > >> [...] > >> - > >> -/* Description > >> - * Free an allocated percpu object. All fields of the object that require > >> - * destruction will be destructed before the storage is freed. > >> - * > >> - * The 'meta' parameter is rewritten by the verifier, no need for BPF > >> - * program to set it. > >> - * Returns > >> - * Void. > >> - */ > >> -extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym; > >> +/* Convenience macro to wrap over bpf_percpu_obj_new */ > >> +#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new(bpf_core_type_id_local(type))) > > > > nit, seems like a shame to remove all the comments which are still > > valid, maybe we could leave those externs with comments and just > > drop the _impl suffix > > I assumed they were copy-pasted from somewhere, but apparently no. > > How about we move the useful comments to kernel/helpers.c, where > the kfuncs are defined? sounds good, thanks jirka ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 2/2] selftests/bpf: Update kfuncs using btf_struct_meta to new variants 2026-03-18 23:42 ` [PATCH bpf-next v3 2/2] selftests/bpf: Update kfuncs using btf_struct_meta to new variants Ihor Solodrai 2026-03-19 12:30 ` Jiri Olsa @ 2026-03-20 14:50 ` Mykyta Yatsenko 1 sibling, 0 replies; 21+ messages in thread From: Mykyta Yatsenko @ 2026-03-20 14:50 UTC (permalink / raw) To: Ihor Solodrai, Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman Cc: bpf, kernel-team Ihor Solodrai <ihor.solodrai@linux.dev> writes: > Update selftests to use the new non-_impl kfuncs marked with > KF_IMPLICIT_ARGS by removing redundant declarations and macros from > bpf_experimental.h (the new kfuncs are present in the vmlinux.h) and > updating relevant callsites. > > Fix spin_lock verifier-log matching for lock_id_kptr_preserve by > accepting variable instruction numbers. The calls to kfuncs with > implicit arguments do not have register moves (e.g. r5 = 0) > corresponding to dummy arguments anymore, so the order of instructions > has shifted. > > Signed-off-by: Ihor Solodrai <ihor.solodrai@linux.dev> > --- This patch deletes unnecessary declarations from bpf_experimental.h and aligns few tests to use non-impl kfuncs. Acked-by: Mykyta Yatsenko <yatsenko@meta.com> > .../testing/selftests/bpf/bpf_experimental.h | 156 +----------------- > .../selftests/bpf/prog_tests/spin_lock.c | 5 +- > .../selftests/bpf/progs/kptr_xchg_inline.c | 4 +- > 3 files changed, 9 insertions(+), 156 deletions(-) > > diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h > index 44466acf8083..2234bd6bc9d3 100644 > --- a/tools/testing/selftests/bpf/bpf_experimental.h > +++ b/tools/testing/selftests/bpf/bpf_experimental.h > @@ -8,156 +8,11 @@ > > #define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node))) > > -/* Description > - * Allocates an object of the type represented by 'local_type_id' in > - * program BTF. User may use the bpf_core_type_id_local macro to pass the > - * type ID of a struct in program BTF. > - * > - * The 'local_type_id' parameter must be a known constant. > - * The 'meta' parameter is rewritten by the verifier, no need for BPF > - * program to set it. > - * Returns > - * A pointer to an object of the type corresponding to the passed in > - * 'local_type_id', or NULL on failure. > - */ > -extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; > - > -/* Convenience macro to wrap over bpf_obj_new_impl */ > -#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL)) > - > -/* Description > - * Free an allocated object. All fields of the object that require > - * destruction will be destructed before the storage is freed. > - * > - * The 'meta' parameter is rewritten by the verifier, no need for BPF > - * program to set it. > - * Returns > - * Void. > - */ > -extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; > - > -/* Convenience macro to wrap over bpf_obj_drop_impl */ > -#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL) > - > -/* Description > - * Increment the refcount on a refcounted local kptr, turning the > - * non-owning reference input into an owning reference in the process. > - * > - * The 'meta' parameter is rewritten by the verifier, no need for BPF > - * program to set it. > - * Returns > - * An owning reference to the object pointed to by 'kptr' > - */ > -extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym; > - > -/* Convenience macro to wrap over bpf_refcount_acquire_impl */ > -#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL) > - > -/* Description > - * Add a new entry to the beginning of the BPF linked list. > - * > - * The 'meta' and 'off' parameters are rewritten by the verifier, no need > - * for BPF programs to set them > - * Returns > - * 0 if the node was successfully added > - * -EINVAL if the node wasn't added because it's already in a list > - */ > -extern int bpf_list_push_front_impl(struct bpf_list_head *head, > - struct bpf_list_node *node, > - void *meta, __u64 off) __ksym; > - > -/* Convenience macro to wrap over bpf_list_push_front_impl */ > -#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0) > - > -/* Description > - * Add a new entry to the end of the BPF linked list. > - * > - * The 'meta' and 'off' parameters are rewritten by the verifier, no need > - * for BPF programs to set them > - * Returns > - * 0 if the node was successfully added > - * -EINVAL if the node wasn't added because it's already in a list > - */ > -extern int bpf_list_push_back_impl(struct bpf_list_head *head, > - struct bpf_list_node *node, > - void *meta, __u64 off) __ksym; > - > -/* Convenience macro to wrap over bpf_list_push_back_impl */ > -#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0) > - > -/* Description > - * Remove the entry at the beginning of the BPF linked list. > - * Returns > - * Pointer to bpf_list_node of deleted entry, or NULL if list is empty. > - */ > -extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym; > - > -/* Description > - * Remove the entry at the end of the BPF linked list. > - * Returns > - * Pointer to bpf_list_node of deleted entry, or NULL if list is empty. > - */ > -extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; > - > -/* Description > - * Remove 'node' from rbtree with root 'root' > - * Returns > - * Pointer to the removed node, or NULL if 'root' didn't contain 'node' > - */ > -extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, > - struct bpf_rb_node *node) __ksym; > - > -/* Description > - * Add 'node' to rbtree with root 'root' using comparator 'less' > - * > - * The 'meta' and 'off' parameters are rewritten by the verifier, no need > - * for BPF programs to set them > - * Returns > - * 0 if the node was successfully added > - * -EINVAL if the node wasn't added because it's already in a tree > - */ > -extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, > - bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), > - void *meta, __u64 off) __ksym; > - > -/* Convenience macro to wrap over bpf_rbtree_add_impl */ > -#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) > +/* Convenience macro to wrap over bpf_obj_new */ > +#define bpf_obj_new(type) ((type *)bpf_obj_new(bpf_core_type_id_local(type))) > > -/* Description > - * Return the first (leftmost) node in input tree > - * Returns > - * Pointer to the node, which is _not_ removed from the tree. If the tree > - * contains no nodes, returns NULL. > - */ > -extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; > - > -/* Description > - * Allocates a percpu object of the type represented by 'local_type_id' in > - * program BTF. User may use the bpf_core_type_id_local macro to pass the > - * type ID of a struct in program BTF. > - * > - * The 'local_type_id' parameter must be a known constant. > - * The 'meta' parameter is rewritten by the verifier, no need for BPF > - * program to set it. > - * Returns > - * A pointer to a percpu object of the type corresponding to the passed in > - * 'local_type_id', or NULL on failure. > - */ > -extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym; > - > -/* Convenience macro to wrap over bpf_percpu_obj_new_impl */ > -#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL)) > - > -/* Description > - * Free an allocated percpu object. All fields of the object that require > - * destruction will be destructed before the storage is freed. > - * > - * The 'meta' parameter is rewritten by the verifier, no need for BPF > - * program to set it. > - * Returns > - * Void. > - */ > -extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym; > +/* Convenience macro to wrap over bpf_percpu_obj_new */ > +#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new(bpf_core_type_id_local(type))) > > struct bpf_iter_task_vma; > > @@ -167,9 +22,6 @@ extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, > extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym; > extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym; > > -/* Convenience macro to wrap over bpf_obj_drop_impl */ > -#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL) > - > /* Description > * Throw a BPF exception from the program, immediately terminating its > * execution and unwinding the stack. The supplied 'cookie' parameter > diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c > index 254fbfeab06a..bbe476f4c47d 100644 > --- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c > +++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c > @@ -13,8 +13,9 @@ static struct { > const char *err_msg; > } spin_lock_fail_tests[] = { > { "lock_id_kptr_preserve", > - "5: (bf) r1 = r0 ; R0=ptr_foo(id=2,ref_obj_id=2) " > - "R1=ptr_foo(id=2,ref_obj_id=2) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n" > + "[0-9]\\+: (bf) r1 = r0 ; R0=ptr_foo(id=2,ref_obj_id=2)" > + " R1=ptr_foo(id=2,ref_obj_id=2) refs=2\n" > + "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n" > "R1 type=ptr_ expected=percpu_ptr_" }, > { "lock_id_global_zero", > "; R1=map_value(map=.data.A,ks=4,vs=4)\n2: (85) call bpf_this_cpu_ptr#154\n" > diff --git a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c > index 2414ac20b6d5..ca5943166057 100644 > --- a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c > +++ b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c > @@ -25,14 +25,14 @@ __naked int kptr_xchg_inline(void) > "if r0 == 0 goto 1f;" > "r1 = r0;" > "r2 = 0;" > - "call %[bpf_obj_drop_impl];" > + "call %[bpf_obj_drop];" > "1:" > "r0 = 0;" > "exit;" > : > : __imm_addr(ptr), > __imm(bpf_kptr_xchg), > - __imm(bpf_obj_drop_impl) > + __imm(bpf_obj_drop) > : __clobber_all > ); > } > -- > 2.53.0 ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-18 23:42 [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS Ihor Solodrai 2026-03-18 23:42 ` [PATCH bpf-next v3 2/2] selftests/bpf: Update kfuncs using btf_struct_meta to new variants Ihor Solodrai @ 2026-03-19 12:25 ` Jiri Olsa 2026-03-19 20:37 ` Ihor Solodrai 2026-03-20 15:49 ` Mykyta Yatsenko 2026-03-21 20:27 ` Alexei Starovoitov 3 siblings, 1 reply; 21+ messages in thread From: Jiri Olsa @ 2026-03-19 12:25 UTC (permalink / raw) To: Ihor Solodrai Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, kernel-team On Wed, Mar 18, 2026 at 04:42:09PM -0700, Ihor Solodrai wrote: SNIP > > +/* > + * A kfunc with KF_IMPLICIT_ARGS has two prototypes in BTF: > + * - the _impl prototype with full arg list (this is meta->func_proto) > + * - the BPF API prototype w/o implicit args (func->type in BTF) > + * To determine whether an argument is implicit, we compare its position > + * against the number of arguments of both prototypes. > + */ > +static bool is_kfunc_arg_implicit(const struct bpf_kfunc_call_arg_meta *meta, u32 arg_idx) > +{ > + const struct btf_type *func, *func_proto; > + u32 argn, full_argn; > + > + if (!(meta->kfunc_flags & KF_IMPLICIT_ARGS)) > + return false; > + > + full_argn = btf_type_vlen(meta->func_proto); > + > + func = btf_type_by_id(meta->btf, meta->func_id); > + func_proto = btf_type_by_id(meta->btf, func->type); > + argn = btf_type_vlen(func_proto); > + > + return argn <= arg_idx && arg_idx < full_argn; hi, I understand above is faster, but should we rather check for exact types with something like: __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_PROG_AUX_ID || __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_BTF_STRUCT_META jirka ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-19 12:25 ` [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS Jiri Olsa @ 2026-03-19 20:37 ` Ihor Solodrai 0 siblings, 0 replies; 21+ messages in thread From: Ihor Solodrai @ 2026-03-19 20:37 UTC (permalink / raw) To: Jiri Olsa Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, kernel-team On 3/19/26 5:25 AM, Jiri Olsa wrote: > On Wed, Mar 18, 2026 at 04:42:09PM -0700, Ihor Solodrai wrote: > > SNIP > >> >> +/* >> + * A kfunc with KF_IMPLICIT_ARGS has two prototypes in BTF: >> + * - the _impl prototype with full arg list (this is meta->func_proto) >> + * - the BPF API prototype w/o implicit args (func->type in BTF) >> + * To determine whether an argument is implicit, we compare its position >> + * against the number of arguments of both prototypes. >> + */ >> +static bool is_kfunc_arg_implicit(const struct bpf_kfunc_call_arg_meta *meta, u32 arg_idx) >> +{ >> + const struct btf_type *func, *func_proto; >> + u32 argn, full_argn; >> + >> + if (!(meta->kfunc_flags & KF_IMPLICIT_ARGS)) >> + return false; >> + >> + full_argn = btf_type_vlen(meta->func_proto); >> + >> + func = btf_type_by_id(meta->btf, meta->func_id); >> + func_proto = btf_type_by_id(meta->btf, func->type); >> + argn = btf_type_vlen(func_proto); >> + >> + return argn <= arg_idx && arg_idx < full_argn; > > hi, Hi Jiri, thank you for taking a look. > I understand above is faster, but should we rather check for exact > types with something like: > > __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_PROG_AUX_ID || > __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_BTF_STRUCT_META This is an open question, see my discussion with Andrii [1]. IMO we don't have to enforce the argument type in is_kfunc_arg_implicit(). The only way I can see this check causing problems is by an invalid kfunc declaration, or a relevant bug in resolve_btfids causing bad/wrong BTF. My understanding is that verifier can trust kernel's BTF as long as it's valid BTF. But let me know if I'm missing something here. [1] https://lore.kernel.org/bpf/3d069965-2992-421f-bb94-827bcb177f17@linux.dev/ > > jirka ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-18 23:42 [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS Ihor Solodrai 2026-03-18 23:42 ` [PATCH bpf-next v3 2/2] selftests/bpf: Update kfuncs using btf_struct_meta to new variants Ihor Solodrai 2026-03-19 12:25 ` [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS Jiri Olsa @ 2026-03-20 15:49 ` Mykyta Yatsenko 2026-03-27 0:16 ` Ihor Solodrai 2026-03-21 20:27 ` Alexei Starovoitov 3 siblings, 1 reply; 21+ messages in thread From: Mykyta Yatsenko @ 2026-03-20 15:49 UTC (permalink / raw) To: Ihor Solodrai, Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman Cc: bpf, kernel-team Ihor Solodrai <ihor.solodrai@linux.dev> writes: > The following kfuncs currently accept void *meta__ign argument: > * bpf_obj_new_impl > * bpf_obj_drop_impl > * bpf_percpu_obj_new_impl > * bpf_percpu_obj_drop_impl > * bpf_refcount_acquire_impl > * bpf_list_push_front_impl > * bpf_rbtree_add_impl > > The __ign suffix is an indicator for the verifier to skip the argument > in check_kfunc_args(). Then, in fixup_kfunc_call() the verifier may > set the value of this argument to struct btf_struct_meta * > kptr_struct_meta from insn_aux_data. > > BPF programs must pass a dummy NULL value when caling these kfuncs. > > Additionally, the list and rbtree _impl kfuncs also accept an implicit > u64 argument, which doesn't require __ign suffix because it's a > scalar, and BPF programs explicitly pass 0. > > Add new kfuncs with KF_IMPLICIT_ARGS [1], that correspond to each > _impl kfunc accepting meta__ign. The existing _impl kfuncs remain > unchanged for backwards compatibility. > > To support this, add "btf_struct_meta" to the list of recognized > implicit argument types in resolve_btfids. > > Implement is_kfunc_arg_implicit() in the verifier, that determines > implicit args by inspecting both (_impl and non-_impl) BTF prototypes > of the kfunc. > > Update the special_kfunc_list in the verifier and relevant checks to > support both the old _impl and the new KF_IMPLICIT_ARGS variants of > btf_struct_meta users. > > [1] https://lore.kernel.org/bpf/20260120222638.3976562-1-ihor.solodrai@linux.dev/ > > Signed-off-by: Ihor Solodrai <ihor.solodrai@linux.dev> > > --- > > v1->v3: Nits suggested by AI > > v1: https://lore.kernel.org/bpf/20260312193546.192786-1-ihor.solodrai@linux.dev/ > > --- > kernel/bpf/helpers.c | 93 +++++++-- > kernel/bpf/verifier.c | 184 +++++++++++++----- > tools/bpf/resolve_btfids/main.c | 1 + > .../selftests/bpf/progs/percpu_alloc_fail.c | 4 +- > 4 files changed, 206 insertions(+), 76 deletions(-) > > diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c > index cb6d242bd093..fc4d537c0b15 100644 > --- a/kernel/bpf/helpers.c > +++ b/kernel/bpf/helpers.c > @@ -2302,9 +2302,8 @@ void bpf_rb_root_free(const struct btf_field *field, void *rb_root, > > __bpf_kfunc_start_defs(); > > -__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) > +__bpf_kfunc void *bpf_obj_new(u64 local_type_id__k, struct btf_struct_meta *meta) > { > - struct btf_struct_meta *meta = meta__ign; > u64 size = local_type_id__k; > void *p; > > @@ -2313,17 +2312,28 @@ __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) > return NULL; > if (meta) > bpf_obj_init(meta->record, p); > + > return p; > } > > -__bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) > +__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) > +{ > + return bpf_obj_new(local_type_id__k, meta__ign); > +} > + > +__bpf_kfunc void *bpf_percpu_obj_new(u64 local_type_id__k, struct btf_struct_meta *meta) > { > u64 size = local_type_id__k; > > - /* The verifier has ensured that meta__ign must be NULL */ > + /* The verifier has ensured that meta must be NULL */ > return bpf_mem_alloc(&bpf_global_percpu_ma, size); > } > > +__bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) > +{ > + return bpf_percpu_obj_new(local_type_id__k, meta__ign); > +} > + > /* Must be called under migrate_disable(), as required by bpf_mem_free */ > void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu) > { > @@ -2347,23 +2357,31 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu) > bpf_mem_free_rcu(ma, p); > } > > -__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) > +__bpf_kfunc void bpf_obj_drop(void *p__alloc, struct btf_struct_meta *meta) > { > - struct btf_struct_meta *meta = meta__ign; > void *p = p__alloc; > > __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false); > } > > -__bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) > +__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) > +{ > + return bpf_obj_drop(p__alloc, meta__ign); > +} > + > +__bpf_kfunc void bpf_percpu_obj_drop(void *p__alloc, struct btf_struct_meta *meta) > { > - /* The verifier has ensured that meta__ign must be NULL */ > + /* The verifier has ensured that meta must be NULL */ > bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc); > } > > -__bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) > +__bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) > +{ > + bpf_percpu_obj_drop(p__alloc, meta__ign); > +} > + > +__bpf_kfunc void *bpf_refcount_acquire(void *p__refcounted_kptr, struct btf_struct_meta *meta) > { > - struct btf_struct_meta *meta = meta__ign; > struct bpf_refcount *ref; > > /* Could just cast directly to refcount_t *, but need some code using > @@ -2379,6 +2397,11 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta > return (void *)p__refcounted_kptr; > } > > +__bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) > +{ > + return bpf_refcount_acquire(p__refcounted_kptr, meta__ign); > +} > + > static int __bpf_list_add(struct bpf_list_node_kern *node, > struct bpf_list_head *head, > bool tail, struct btf_record *rec, u64 off) > @@ -2406,24 +2429,38 @@ static int __bpf_list_add(struct bpf_list_node_kern *node, > return 0; > } > > +__bpf_kfunc int bpf_list_push_front(struct bpf_list_head *head, > + struct bpf_list_node *node, > + struct btf_struct_meta *meta, > + u64 off) > +{ > + struct bpf_list_node_kern *n = (void *)node; > + > + return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); > +} > + > __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, > struct bpf_list_node *node, > void *meta__ign, u64 off) > +{ > + return bpf_list_push_front(head, node, meta__ign, off); > +} > + > +__bpf_kfunc int bpf_list_push_back(struct bpf_list_head *head, > + struct bpf_list_node *node, > + struct btf_struct_meta *meta, > + u64 off) > { > struct bpf_list_node_kern *n = (void *)node; > - struct btf_struct_meta *meta = meta__ign; > > - return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); > + return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); > } > > __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, > struct bpf_list_node *node, > void *meta__ign, u64 off) > { > - struct bpf_list_node_kern *n = (void *)node; > - struct btf_struct_meta *meta = meta__ign; > - > - return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); > + return bpf_list_push_back(head, node, meta__ign, off); > } > > static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) > @@ -2535,16 +2572,24 @@ static int __bpf_rbtree_add(struct bpf_rb_root *root, > return 0; > } > > -__bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, > - bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), > - void *meta__ign, u64 off) > +__bpf_kfunc int bpf_rbtree_add(struct bpf_rb_root *root, > + struct bpf_rb_node *node, > + bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), > + struct btf_struct_meta *meta, > + u64 off) > { > - struct btf_struct_meta *meta = meta__ign; > struct bpf_rb_node_kern *n = (void *)node; > > return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); > } > > +__bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, > + bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), > + void *meta__ign, u64 off) > +{ > + return bpf_rbtree_add(root, node, less, meta__ign, off); > +} > + > __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) > { > struct rb_root_cached *r = (struct rb_root_cached *)root; > @@ -4536,12 +4581,19 @@ BTF_KFUNCS_START(generic_btf_ids) > #ifdef CONFIG_CRASH_DUMP > BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) > #endif > +BTF_ID_FLAGS(func, bpf_obj_new, KF_ACQUIRE | KF_RET_NULL | KF_IMPLICIT_ARGS) > BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) > +BTF_ID_FLAGS(func, bpf_percpu_obj_new, KF_ACQUIRE | KF_RET_NULL | KF_IMPLICIT_ARGS) > BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) > +BTF_ID_FLAGS(func, bpf_obj_drop, KF_RELEASE | KF_IMPLICIT_ARGS) > BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) > +BTF_ID_FLAGS(func, bpf_percpu_obj_drop, KF_RELEASE | KF_IMPLICIT_ARGS) > BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE) > +BTF_ID_FLAGS(func, bpf_refcount_acquire, KF_ACQUIRE | KF_RET_NULL | KF_RCU | KF_IMPLICIT_ARGS) > BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU) > +BTF_ID_FLAGS(func, bpf_list_push_front, KF_IMPLICIT_ARGS) > BTF_ID_FLAGS(func, bpf_list_push_front_impl) > +BTF_ID_FLAGS(func, bpf_list_push_back, KF_IMPLICIT_ARGS) > BTF_ID_FLAGS(func, bpf_list_push_back_impl) > BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) > BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) > @@ -4550,6 +4602,7 @@ BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL) > BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) > BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) > BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) > +BTF_ID_FLAGS(func, bpf_rbtree_add, KF_IMPLICIT_ARGS) > BTF_ID_FLAGS(func, bpf_rbtree_add_impl) > BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) > BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL) > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > index 01c18f4268de..25d0564de6bc 100644 > --- a/kernel/bpf/verifier.c > +++ b/kernel/bpf/verifier.c > @@ -12368,7 +12368,8 @@ enum { > KF_ARG_RES_SPIN_LOCK_ID, > KF_ARG_TASK_WORK_ID, > KF_ARG_PROG_AUX_ID, > - KF_ARG_TIMER_ID > + KF_ARG_TIMER_ID, > + KF_ARG_BTF_STRUCT_META, > }; > > BTF_ID_LIST(kf_arg_btf_ids) > @@ -12382,6 +12383,7 @@ BTF_ID(struct, bpf_res_spin_lock) > BTF_ID(struct, bpf_task_work) > BTF_ID(struct, bpf_prog_aux) > BTF_ID(struct, bpf_timer) > +BTF_ID(struct, btf_struct_meta) > > static bool __is_kfunc_ptr_arg_type(const struct btf *btf, > const struct btf_param *arg, int type) > @@ -12472,6 +12474,30 @@ static bool is_kfunc_arg_prog_aux(const struct btf *btf, const struct btf_param > return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_PROG_AUX_ID); > } > > +/* > + * A kfunc with KF_IMPLICIT_ARGS has two prototypes in BTF: > + * - the _impl prototype with full arg list (this is meta->func_proto) > + * - the BPF API prototype w/o implicit args (func->type in BTF) > + * To determine whether an argument is implicit, we compare its position > + * against the number of arguments of both prototypes. > + */ > +static bool is_kfunc_arg_implicit(const struct bpf_kfunc_call_arg_meta *meta, u32 arg_idx) > +{ > + const struct btf_type *func, *func_proto; > + u32 argn, full_argn; > + > + if (!(meta->kfunc_flags & KF_IMPLICIT_ARGS)) > + return false; > + > + full_argn = btf_type_vlen(meta->func_proto); > + > + func = btf_type_by_id(meta->btf, meta->func_id); > + func_proto = btf_type_by_id(meta->btf, func->type); > + argn = btf_type_vlen(func_proto); > + > + return argn <= arg_idx && arg_idx < full_argn; The `arg_idx < full_argn` condition is not necessary, is it? arg_idx is always less than full_argn because full_argn is the number of arguments in the _impl variant of the function, which is supposed to be greater than non-_impl variant that arg_idx tracks. arg_idx >= full_argn is an invariant violation, not the implicit argument condition, if I understand this right. The rest of the refactoring looks good to me. > +} > + > /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */ > static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env, > const struct btf *btf, > @@ -12594,6 +12620,14 @@ enum special_kfunc_type { > KF_bpf_session_is_return, > KF_bpf_stream_vprintk, > KF_bpf_stream_print_stack, > + KF_bpf_obj_new, > + KF_bpf_percpu_obj_new, > + KF_bpf_obj_drop, > + KF_bpf_percpu_obj_drop, > + KF_bpf_refcount_acquire, > + KF_bpf_list_push_front, > + KF_bpf_list_push_back, > + KF_bpf_rbtree_add, > }; > > BTF_ID_LIST(special_kfunc_list) > @@ -12674,6 +12708,58 @@ BTF_ID(func, bpf_arena_reserve_pages) > BTF_ID(func, bpf_session_is_return) > BTF_ID(func, bpf_stream_vprintk) > BTF_ID(func, bpf_stream_print_stack) > +BTF_ID(func, bpf_obj_new) > +BTF_ID(func, bpf_percpu_obj_new) > +BTF_ID(func, bpf_obj_drop) > +BTF_ID(func, bpf_percpu_obj_drop) > +BTF_ID(func, bpf_refcount_acquire) > +BTF_ID(func, bpf_list_push_front) > +BTF_ID(func, bpf_list_push_back) > +BTF_ID(func, bpf_rbtree_add) > + > +static bool is_bpf_obj_new_kfunc(u32 func_id) > +{ > + return func_id == special_kfunc_list[KF_bpf_obj_new] || > + func_id == special_kfunc_list[KF_bpf_obj_new_impl]; > +} > + > +static bool is_bpf_percpu_obj_new_kfunc(u32 func_id) > +{ > + return func_id == special_kfunc_list[KF_bpf_percpu_obj_new] || > + func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]; > +} > + > +static bool is_bpf_obj_drop_kfunc(u32 func_id) > +{ > + return func_id == special_kfunc_list[KF_bpf_obj_drop] || > + func_id == special_kfunc_list[KF_bpf_obj_drop_impl]; > +} > + > +static bool is_bpf_percpu_obj_drop_kfunc(u32 func_id) > +{ > + return func_id == special_kfunc_list[KF_bpf_percpu_obj_drop] || > + func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl]; > +} > + > +static bool is_bpf_refcount_acquire_kfunc(u32 func_id) > +{ > + return func_id == special_kfunc_list[KF_bpf_refcount_acquire] || > + func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]; > +} > + > +static bool is_bpf_list_push_kfunc(u32 func_id) > +{ > + return func_id == special_kfunc_list[KF_bpf_list_push_front] || > + func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || > + func_id == special_kfunc_list[KF_bpf_list_push_back] || > + func_id == special_kfunc_list[KF_bpf_list_push_back_impl]; > +} > + > +static bool is_bpf_rbtree_add_kfunc(u32 func_id) > +{ > + return func_id == special_kfunc_list[KF_bpf_rbtree_add] || > + func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; > +} > > static bool is_task_work_add_kfunc(u32 func_id) > { > @@ -12683,10 +12769,8 @@ static bool is_task_work_add_kfunc(u32 func_id) > > static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) > { > - if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && > - meta->arg_owning_ref) { > + if (is_bpf_refcount_acquire_kfunc(meta->func_id) && meta->arg_owning_ref) > return false; > - } > > return meta->kfunc_flags & KF_RET_NULL; > } > @@ -13074,8 +13158,7 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_ > > static bool is_bpf_list_api_kfunc(u32 btf_id) > { > - return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || > - btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] || > + return is_bpf_list_push_kfunc(btf_id) || > btf_id == special_kfunc_list[KF_bpf_list_pop_front] || > btf_id == special_kfunc_list[KF_bpf_list_pop_back] || > btf_id == special_kfunc_list[KF_bpf_list_front] || > @@ -13084,7 +13167,7 @@ static bool is_bpf_list_api_kfunc(u32 btf_id) > > static bool is_bpf_rbtree_api_kfunc(u32 btf_id) > { > - return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] || > + return is_bpf_rbtree_add_kfunc(btf_id) || > btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || > btf_id == special_kfunc_list[KF_bpf_rbtree_first] || > btf_id == special_kfunc_list[KF_bpf_rbtree_root] || > @@ -13101,8 +13184,9 @@ static bool is_bpf_iter_num_api_kfunc(u32 btf_id) > > static bool is_bpf_graph_api_kfunc(u32 btf_id) > { > - return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) || > - btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]; > + return is_bpf_list_api_kfunc(btf_id) || > + is_bpf_rbtree_api_kfunc(btf_id) || > + is_bpf_refcount_acquire_kfunc(btf_id); > } > > static bool is_bpf_res_spin_lock_kfunc(u32 btf_id) > @@ -13135,7 +13219,7 @@ static bool kfunc_spin_allowed(u32 btf_id) > > static bool is_sync_callback_calling_kfunc(u32 btf_id) > { > - return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; > + return is_bpf_rbtree_add_kfunc(btf_id); > } > > static bool is_async_callback_calling_kfunc(u32 btf_id) > @@ -13199,12 +13283,11 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env, > > switch (node_field_type) { > case BPF_LIST_NODE: > - ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || > - kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]); > + ret = is_bpf_list_push_kfunc(kfunc_btf_id); > break; > case BPF_RB_NODE: > - ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || > - kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] || > + ret = (is_bpf_rbtree_add_kfunc(kfunc_btf_id) || > + kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || > kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] || > kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]); > break; > @@ -13421,11 +13504,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ > bool is_ret_buf_sz = false; > int kf_arg_type; > > - t = btf_type_skip_modifiers(btf, args[i].type, NULL); > - > - if (is_kfunc_arg_ignore(btf, &args[i])) > - continue; > - > if (is_kfunc_arg_prog_aux(btf, &args[i])) { > /* Reject repeated use bpf_prog_aux */ > if (meta->arg_prog) { > @@ -13437,6 +13515,11 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ > continue; > } > > + if (is_kfunc_arg_ignore(btf, &args[i]) || is_kfunc_arg_implicit(meta, i)) > + continue; > + > + t = btf_type_skip_modifiers(btf, args[i].type, NULL); > + > if (btf_type_is_scalar(t)) { > if (reg->type != SCALAR_VALUE) { > verbose(env, "R%d is not a scalar\n", regno); > @@ -13611,13 +13694,13 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ > break; > case KF_ARG_PTR_TO_ALLOC_BTF_ID: > if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) { > - if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) { > - verbose(env, "arg#%d expected for bpf_obj_drop_impl()\n", i); > + if (!is_bpf_obj_drop_kfunc(meta->func_id)) { > + verbose(env, "arg#%d expected for bpf_obj_drop()\n", i); > return -EINVAL; > } > } else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) { > - if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { > - verbose(env, "arg#%d expected for bpf_percpu_obj_drop_impl()\n", i); > + if (!is_bpf_percpu_obj_drop_kfunc(meta->func_id)) { > + verbose(env, "arg#%d expected for bpf_percpu_obj_drop()\n", i); > return -EINVAL; > } > } else { > @@ -13743,7 +13826,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ > return ret; > break; > case KF_ARG_PTR_TO_RB_NODE: > - if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { > + if (is_bpf_rbtree_add_kfunc(meta->func_id)) { > if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { > verbose(env, "arg#%d expected pointer to allocated object\n", i); > return -EINVAL; > @@ -13980,13 +14063,12 @@ static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_ca > if (meta->btf != btf_vmlinux) > return 0; > > - if (meta->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || > - meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { > + if (is_bpf_obj_new_kfunc(meta->func_id) || is_bpf_percpu_obj_new_kfunc(meta->func_id)) { > struct btf_struct_meta *struct_meta; > struct btf *ret_btf; > u32 ret_btf_id; > > - if (meta->func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set) > + if (is_bpf_obj_new_kfunc(meta->func_id) && !bpf_global_ma_set) > return -ENOMEM; > > if (((u64)(u32)meta->arg_constant.value) != meta->arg_constant.value) { > @@ -14009,7 +14091,7 @@ static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_ca > return -EINVAL; > } > > - if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { > + if (is_bpf_percpu_obj_new_kfunc(meta->func_id)) { > if (ret_t->size > BPF_GLOBAL_PERCPU_MA_MAX_SIZE) { > verbose(env, "bpf_percpu_obj_new type size (%d) is greater than %d\n", > ret_t->size, BPF_GLOBAL_PERCPU_MA_MAX_SIZE); > @@ -14039,7 +14121,7 @@ static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_ca > } > > struct_meta = btf_find_struct_meta(ret_btf, ret_btf_id); > - if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { > + if (is_bpf_percpu_obj_new_kfunc(meta->func_id)) { > if (!__btf_type_is_scalar_struct(env, ret_btf, ret_t, 0)) { > verbose(env, "bpf_percpu_obj_new type ID argument must be of a struct of scalars\n"); > return -EINVAL; > @@ -14055,12 +14137,12 @@ static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_ca > regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; > regs[BPF_REG_0].btf = ret_btf; > regs[BPF_REG_0].btf_id = ret_btf_id; > - if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) > + if (is_bpf_percpu_obj_new_kfunc(meta->func_id)) > regs[BPF_REG_0].type |= MEM_PERCPU; > > insn_aux->obj_new_size = ret_t->size; > insn_aux->kptr_struct_meta = struct_meta; > - } else if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { > + } else if (is_bpf_refcount_acquire_kfunc(meta->func_id)) { > mark_reg_known_zero(env, regs, BPF_REG_0); > regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; > regs[BPF_REG_0].btf = meta->arg_btf; > @@ -14226,7 +14308,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, > if (err < 0) > return err; > > - if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { > + if (is_bpf_rbtree_add_kfunc(meta.func_id)) { > err = push_callback_call(env, insn, insn_idx, meta.subprogno, > set_rbtree_add_callback_state); > if (err) { > @@ -14340,9 +14422,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, > return err; > } > > - if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || > - meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || > - meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { > + if (is_bpf_list_push_kfunc(meta.func_id) || is_bpf_rbtree_add_kfunc(meta.func_id)) { > release_ref_obj_id = regs[BPF_REG_2].ref_obj_id; > insn_aux->insert_off = regs[BPF_REG_2].var_off.value; > insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id); > @@ -14390,11 +14470,10 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, > t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL); > > if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) { > - /* Only exception is bpf_obj_new_impl */ > if (meta.btf != btf_vmlinux || > - (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] && > - meta.func_id != special_kfunc_list[KF_bpf_percpu_obj_new_impl] && > - meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) { > + (!is_bpf_obj_new_kfunc(meta.func_id) && > + !is_bpf_percpu_obj_new_kfunc(meta.func_id) && > + !is_bpf_refcount_acquire_kfunc(meta.func_id))) { > verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n"); > return -EINVAL; > } > @@ -14505,8 +14584,8 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, > regs[BPF_REG_0].id = ++env->id_gen; > } else if (btf_type_is_void(t)) { > if (meta.btf == btf_vmlinux) { > - if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || > - meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { > + if (is_bpf_obj_drop_kfunc(meta.func_id) || > + is_bpf_percpu_obj_drop_kfunc(meta.func_id)) { > insn_aux->kptr_struct_meta = > btf_find_struct_meta(meta.arg_btf, > meta.arg_btf_id); > @@ -23312,13 +23391,12 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, > if (!bpf_jit_supports_far_kfunc_call()) > insn->imm = BPF_CALL_IMM(desc->addr); > > - if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || > - desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { > + if (is_bpf_obj_new_kfunc(desc->func_id) || is_bpf_percpu_obj_new_kfunc(desc->func_id)) { > struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; > struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; > u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size; > > - if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) { > + if (is_bpf_percpu_obj_new_kfunc(desc->func_id) && kptr_struct_meta) { > verifier_bug(env, "NULL kptr_struct_meta expected at insn_idx %d", > insn_idx); > return -EFAULT; > @@ -23329,20 +23407,19 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, > insn_buf[2] = addr[1]; > insn_buf[3] = *insn; > *cnt = 4; > - } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || > - desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] || > - desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { > + } else if (is_bpf_obj_drop_kfunc(desc->func_id) || > + is_bpf_percpu_obj_drop_kfunc(desc->func_id) || > + is_bpf_refcount_acquire_kfunc(desc->func_id)) { > struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; > struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; > > - if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) { > + if (is_bpf_percpu_obj_drop_kfunc(desc->func_id) && kptr_struct_meta) { > verifier_bug(env, "NULL kptr_struct_meta expected at insn_idx %d", > insn_idx); > return -EFAULT; > } > > - if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && > - !kptr_struct_meta) { > + if (is_bpf_refcount_acquire_kfunc(desc->func_id) && !kptr_struct_meta) { > verifier_bug(env, "kptr_struct_meta expected at insn_idx %d", > insn_idx); > return -EFAULT; > @@ -23352,15 +23429,14 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, > insn_buf[1] = addr[1]; > insn_buf[2] = *insn; > *cnt = 3; > - } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || > - desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || > - desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { > + } else if (is_bpf_list_push_kfunc(desc->func_id) || > + is_bpf_rbtree_add_kfunc(desc->func_id)) { > struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; > int struct_meta_reg = BPF_REG_3; > int node_offset_reg = BPF_REG_4; > > /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */ > - if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { > + if (is_bpf_rbtree_add_kfunc(desc->func_id)) { > struct_meta_reg = BPF_REG_4; > node_offset_reg = BPF_REG_5; > } > diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c > index 5208f650080f..f8a91fa7584f 100644 > --- a/tools/bpf/resolve_btfids/main.c > +++ b/tools/bpf/resolve_btfids/main.c > @@ -1065,6 +1065,7 @@ static bool is_kf_implicit_arg(const struct btf *btf, const struct btf_param *p) > { > static const char *const kf_implicit_arg_types[] = { > "bpf_prog_aux", > + "btf_struct_meta", > }; > const struct btf_type *t; > const char *name; > diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c > index f2b8eb2ff76f..81813c724fa9 100644 > --- a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c > +++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c > @@ -110,7 +110,7 @@ int BPF_PROG(test_array_map_3) > } > > SEC("?fentry.s/bpf_fentry_test1") > -__failure __msg("arg#0 expected for bpf_percpu_obj_drop_impl()") > +__failure __msg("arg#0 expected for bpf_percpu_obj_drop()") > int BPF_PROG(test_array_map_4) > { > struct val_t __percpu_kptr *p; > @@ -124,7 +124,7 @@ int BPF_PROG(test_array_map_4) > } > > SEC("?fentry.s/bpf_fentry_test1") > -__failure __msg("arg#0 expected for bpf_obj_drop_impl()") > +__failure __msg("arg#0 expected for bpf_obj_drop()") > int BPF_PROG(test_array_map_5) > { > struct val_t *p; > -- > 2.53.0 ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-20 15:49 ` Mykyta Yatsenko @ 2026-03-27 0:16 ` Ihor Solodrai 2026-03-27 19:19 ` Mykyta Yatsenko 0 siblings, 1 reply; 21+ messages in thread From: Ihor Solodrai @ 2026-03-27 0:16 UTC (permalink / raw) To: Mykyta Yatsenko, Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman Cc: bpf, kernel-team On 3/20/26 8:49 AM, Mykyta Yatsenko wrote: > Ihor Solodrai <ihor.solodrai@linux.dev> writes: > >> [...] >> +/* >> + * A kfunc with KF_IMPLICIT_ARGS has two prototypes in BTF: >> + * - the _impl prototype with full arg list (this is meta->func_proto) >> + * - the BPF API prototype w/o implicit args (func->type in BTF) >> + * To determine whether an argument is implicit, we compare its position >> + * against the number of arguments of both prototypes. >> + */ >> +static bool is_kfunc_arg_implicit(const struct bpf_kfunc_call_arg_meta *meta, u32 arg_idx) >> +{ >> + const struct btf_type *func, *func_proto; >> + u32 argn, full_argn; >> + >> + if (!(meta->kfunc_flags & KF_IMPLICIT_ARGS)) >> + return false; >> + >> + full_argn = btf_type_vlen(meta->func_proto); >> + >> + func = btf_type_by_id(meta->btf, meta->func_id); >> + func_proto = btf_type_by_id(meta->btf, func->type); >> + argn = btf_type_vlen(func_proto); >> + >> + return argn <= arg_idx && arg_idx < full_argn; > The `arg_idx < full_argn` condition is not necessary, is it? > arg_idx is always less than full_argn because full_argn is the number of > arguments in the _impl variant of the function, which is supposed to be > greater than non-_impl variant that arg_idx tracks. > arg_idx >= full_argn is an invariant violation, not the implicit > argument condition, if I understand this right. Hi Mykyta, thanks for the review. Yes, I think you're right. I don't think this helper should error out on arg_idx >= full_argn, since that would be an error caught in check_kfunc_call(). And returning an error will make the helper a little more complicated. At the same time, it seems logical to me that the answer to "is arg 6 implicit for kfunc with 5 args in _impl proto?" should be false. Do you have a specific suggestion? > > The rest of the refactoring looks good to me. >> +} >> + >> [...] >> -- >> 2.53.0 ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-27 0:16 ` Ihor Solodrai @ 2026-03-27 19:19 ` Mykyta Yatsenko 0 siblings, 0 replies; 21+ messages in thread From: Mykyta Yatsenko @ 2026-03-27 19:19 UTC (permalink / raw) To: Ihor Solodrai, Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman Cc: bpf, kernel-team Ihor Solodrai <ihor.solodrai@linux.dev> writes: > On 3/20/26 8:49 AM, Mykyta Yatsenko wrote: >> Ihor Solodrai <ihor.solodrai@linux.dev> writes: >> >>> [...] >>> +/* >>> + * A kfunc with KF_IMPLICIT_ARGS has two prototypes in BTF: >>> + * - the _impl prototype with full arg list (this is meta->func_proto) >>> + * - the BPF API prototype w/o implicit args (func->type in BTF) >>> + * To determine whether an argument is implicit, we compare its position >>> + * against the number of arguments of both prototypes. >>> + */ >>> +static bool is_kfunc_arg_implicit(const struct bpf_kfunc_call_arg_meta *meta, u32 arg_idx) >>> +{ >>> + const struct btf_type *func, *func_proto; >>> + u32 argn, full_argn; >>> + >>> + if (!(meta->kfunc_flags & KF_IMPLICIT_ARGS)) >>> + return false; >>> + >>> + full_argn = btf_type_vlen(meta->func_proto); >>> + >>> + func = btf_type_by_id(meta->btf, meta->func_id); >>> + func_proto = btf_type_by_id(meta->btf, func->type); >>> + argn = btf_type_vlen(func_proto); >>> + >>> + return argn <= arg_idx && arg_idx < full_argn; >> The `arg_idx < full_argn` condition is not necessary, is it? >> arg_idx is always less than full_argn because full_argn is the number of >> arguments in the _impl variant of the function, which is supposed to be >> greater than non-_impl variant that arg_idx tracks. >> arg_idx >= full_argn is an invariant violation, not the implicit >> argument condition, if I understand this right. > > Hi Mykyta, thanks for the review. > > Yes, I think you're right. > > I don't think this helper should error out on arg_idx >= full_argn, > since that would be an error caught in check_kfunc_call(). And returning > an error will make the helper a little more complicated. > > At the same time, it seems logical to me that the answer to > "is arg 6 implicit for kfunc with 5 args in _impl proto?" > should be false. > > Do you have a specific suggestion? > Sorry for the delayed reply, just noticed it. I would drop the check, as you mentioned it should be caught earlier, so it's there just as a defensive programming artifact. Argument index is never greater than the number of arguments in the function. >> >> The rest of the refactoring looks good to me. >>> +} >>> + >>> [...] >>> -- >>> 2.53.0 ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-18 23:42 [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS Ihor Solodrai ` (2 preceding siblings ...) 2026-03-20 15:49 ` Mykyta Yatsenko @ 2026-03-21 20:27 ` Alexei Starovoitov 2026-03-23 19:58 ` Ihor Solodrai 3 siblings, 1 reply; 21+ messages in thread From: Alexei Starovoitov @ 2026-03-21 20:27 UTC (permalink / raw) To: Ihor Solodrai Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, Kernel Team On Wed, Mar 18, 2026 at 4:42 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: > > const struct btf *btf, > @@ -12594,6 +12620,14 @@ enum special_kfunc_type { > KF_bpf_session_is_return, > KF_bpf_stream_vprintk, > KF_bpf_stream_print_stack, > + KF_bpf_obj_new, > + KF_bpf_percpu_obj_new, > + KF_bpf_obj_drop, > + KF_bpf_percpu_obj_drop, > + KF_bpf_refcount_acquire, > + KF_bpf_list_push_front, > + KF_bpf_list_push_back, > + KF_bpf_rbtree_add, > }; it's not an uapi. no need to add them to the end. Pls add them next to _impl flavors. The whole thing needs a full refactor in a long term. special_kfunc* thingy got out of hand. pw-bot: cr ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-21 20:27 ` Alexei Starovoitov @ 2026-03-23 19:58 ` Ihor Solodrai 2026-03-24 17:22 ` Alexei Starovoitov 0 siblings, 1 reply; 21+ messages in thread From: Ihor Solodrai @ 2026-03-23 19:58 UTC (permalink / raw) To: Alexei Starovoitov Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, Kernel Team On 3/21/26 1:27 PM, Alexei Starovoitov wrote: > On Wed, Mar 18, 2026 at 4:42 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: >> >> const struct btf *btf, >> @@ -12594,6 +12620,14 @@ enum special_kfunc_type { >> KF_bpf_session_is_return, >> KF_bpf_stream_vprintk, >> KF_bpf_stream_print_stack, >> + KF_bpf_obj_new, >> + KF_bpf_percpu_obj_new, >> + KF_bpf_obj_drop, >> + KF_bpf_percpu_obj_drop, >> + KF_bpf_refcount_acquire, >> + KF_bpf_list_push_front, >> + KF_bpf_list_push_back, >> + KF_bpf_rbtree_add, >> }; > > it's not an uapi. no need to add them to the end. > Pls add them next to _impl flavors. > > The whole thing needs a full refactor in a long term. > special_kfunc* thingy got out of hand. I thought about this a little and tried a couple of ideas. Here is one that looks a bit better to me than KF_* enum: BTF_ID_LIST_NAMED() and BTF_ID_NAMED(). I vibe-coded a PoC [1]. Essentially, we add a set of macros to generate source-level stable symbol for each BTF_ID_LIST entry. Then it is possible to refer to the associated value directly by name. It looks like this: +#define BTF_ID_LIST_NAMED(name) \ + __BTF_ID_LIST(name, local) \ + extern u32 name[]; + +#define BTF_ID_NAMED(list, prefix, name) \ + __BTF_ID(__BTF_ID__##prefix##__##name##__##list, "") \ + extern u32 __btf_id_##list##__##prefix##__##name \ + asm("__BTF_ID__" #prefix "__" #name "__" #list); + +#define btf_id_named(list, prefix, name) \ + (__btf_id_##list##__##prefix##__##name) + And then: +BTF_ID_LIST_NAMED(special_kfunc_list) +BTF_ID_NAMED(special_kfunc_list, func, bpf_obj_new_impl) +BTF_ID_NAMED(special_kfunc_list, func, bpf_obj_drop_impl) [...] +#define special_kfunc_id(name) btf_id_named(special_kfunc_list, func, name) [...] static bool is_bpf_obj_new_kfunc(u32 func_id) { - return func_id == special_kfunc_list[KF_bpf_obj_new] || - func_id == special_kfunc_list[KF_bpf_obj_new_impl]; + return func_id == special_kfunc_id(bpf_obj_new) || + func_id == special_kfunc_id(bpf_obj_new_impl); } The benefit of this is that we can delete enum special_kfunc_type, and there is only one list where a special kfunc has to be defined (and this may work for other similar use cases). The downside is additional BTF_ID macrology, and potentially changes in resolve_btfids. What do people think, is this worth pursuing? [1] https://github.com/theihor/bpf/commit/be38dde0a027d7ab84d8d20ac266251fb938ceb6 > > pw-bot: cr ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-23 19:58 ` Ihor Solodrai @ 2026-03-24 17:22 ` Alexei Starovoitov 2026-03-26 19:13 ` Ihor Solodrai 0 siblings, 1 reply; 21+ messages in thread From: Alexei Starovoitov @ 2026-03-24 17:22 UTC (permalink / raw) To: Ihor Solodrai Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, Kernel Team On Mon, Mar 23, 2026 at 12:58 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: > > On 3/21/26 1:27 PM, Alexei Starovoitov wrote: > > On Wed, Mar 18, 2026 at 4:42 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: > >> > >> const struct btf *btf, > >> @@ -12594,6 +12620,14 @@ enum special_kfunc_type { > >> KF_bpf_session_is_return, > >> KF_bpf_stream_vprintk, > >> KF_bpf_stream_print_stack, > >> + KF_bpf_obj_new, > >> + KF_bpf_percpu_obj_new, > >> + KF_bpf_obj_drop, > >> + KF_bpf_percpu_obj_drop, > >> + KF_bpf_refcount_acquire, > >> + KF_bpf_list_push_front, > >> + KF_bpf_list_push_back, > >> + KF_bpf_rbtree_add, > >> }; > > > > it's not an uapi. no need to add them to the end. > > Pls add them next to _impl flavors. > > > > The whole thing needs a full refactor in a long term. > > special_kfunc* thingy got out of hand. > > I thought about this a little and tried a couple of ideas. Here is > one that looks a bit better to me than KF_* enum: BTF_ID_LIST_NAMED() > and BTF_ID_NAMED(). > > I vibe-coded a PoC [1]. Essentially, we add a set of macros to > generate source-level stable symbol for each BTF_ID_LIST entry. Then > it is possible to refer to the associated value directly by name. > > It looks like this: > > +#define BTF_ID_LIST_NAMED(name) \ > + __BTF_ID_LIST(name, local) \ > + extern u32 name[]; > + > +#define BTF_ID_NAMED(list, prefix, name) \ > + __BTF_ID(__BTF_ID__##prefix##__##name##__##list, "") \ > + extern u32 __btf_id_##list##__##prefix##__##name \ > + asm("__BTF_ID__" #prefix "__" #name "__" #list); ... > [1] https://github.com/theihor/bpf/commit/be38dde0a027d7ab84d8d20ac266251fb938ceb6 too much claude in there. It talks about everything except important bits. How does the above 'asm' magic work? Just to clarify. This potential cleanup is not necessary to land this set. ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-24 17:22 ` Alexei Starovoitov @ 2026-03-26 19:13 ` Ihor Solodrai 2026-03-27 20:48 ` Alexei Starovoitov 0 siblings, 1 reply; 21+ messages in thread From: Ihor Solodrai @ 2026-03-26 19:13 UTC (permalink / raw) To: Alexei Starovoitov Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, Kernel Team On 3/24/26 10:22 AM, Alexei Starovoitov wrote: > On Mon, Mar 23, 2026 at 12:58 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: >> >> On 3/21/26 1:27 PM, Alexei Starovoitov wrote: >>> On Wed, Mar 18, 2026 at 4:42 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: >>>> >>>> const struct btf *btf, >>>> @@ -12594,6 +12620,14 @@ enum special_kfunc_type { >>>> KF_bpf_session_is_return, >>>> KF_bpf_stream_vprintk, >>>> KF_bpf_stream_print_stack, >>>> + KF_bpf_obj_new, >>>> + KF_bpf_percpu_obj_new, >>>> + KF_bpf_obj_drop, >>>> + KF_bpf_percpu_obj_drop, >>>> + KF_bpf_refcount_acquire, >>>> + KF_bpf_list_push_front, >>>> + KF_bpf_list_push_back, >>>> + KF_bpf_rbtree_add, >>>> }; >>> >>> it's not an uapi. no need to add them to the end. >>> Pls add them next to _impl flavors. >>> >>> The whole thing needs a full refactor in a long term. >>> special_kfunc* thingy got out of hand. >> >> I thought about this a little and tried a couple of ideas. Here is >> one that looks a bit better to me than KF_* enum: BTF_ID_LIST_NAMED() >> and BTF_ID_NAMED(). >> >> I vibe-coded a PoC [1]. Essentially, we add a set of macros to >> generate source-level stable symbol for each BTF_ID_LIST entry. Then >> it is possible to refer to the associated value directly by name. >> >> It looks like this: >> >> +#define BTF_ID_LIST_NAMED(name) \ >> + __BTF_ID_LIST(name, local) \ >> + extern u32 name[]; >> + >> +#define BTF_ID_NAMED(list, prefix, name) \ >> + __BTF_ID(__BTF_ID__##prefix##__##name##__##list, "") \ >> + extern u32 __btf_id_##list##__##prefix##__##name \ >> + asm("__BTF_ID__" #prefix "__" #name "__" #list); > > ... > >> [1] https://github.com/theihor/bpf/commit/be38dde0a027d7ab84d8d20ac266251fb938ceb6 > > too much claude in there. It talks about everything except > important bits. Yeah, sorry this is a classic case of AI over-verbosity. > How does the above 'asm' magic work? As it turns out, asm() magic isn't even necessary. Currently BTF_ID_LIST with subsequent BTF_ID create symbols for the list and for each entry like follows: __BTF_ID__##prefix##__##name##__<counter><line> For example: __BTF_ID__func__bpf_obj_new__8524584 These symbols refer to a piece of memory storing BTF ID of course, which is how resolve_btfids can patch them and kernel use them. So the idea I vibe-coded is simple: let's directly access these symbols at the source level instead of maintaining an enums. The obstacle to this was `<counter><line>` suffix, which exists to avoid conflicts between BTF_ID_* containers in the same source file. But the suffix also prevents us from using the symbol directly in C, since you don't know the <counter><line>. Well since every BTF_ID_LIST and co has a unique name (at the source file level), we can use it as a disambiguator suffix instead of the <counter><line>, getting something like: __BTF_ID__func__bpf_obj_new_impl__special_kfunc_list Having that we can add extern declarations (so that C compiler is aware of the symbols) and a helper macro to reconstruct the symbol and read its value: #define BTF_ID_NAMED(list, prefix, name) \ __BTF_ID(__BTF_ID__##prefix##__##name##__##list) \ extern u32 __BTF_ID__##prefix##__##name##__##list; #define btf_id_named(list, prefix, name) \ (__BTF_ID__##prefix##__##name##__##list) btf_id_named(special_kfunc_list, func, bpf_obj_new_impl) I don't know why AI decided it's a good idea to add one more level of aliasing here with asm(), but it is not necessary for this macrology to work. Additional caveat is that resolve_btfids needs to be taught about new suffixes, but that's relatively trivial. The above works, and the question now is if we like this mechanism more than current setup with explicit enums. > > Just to clarify. This potential cleanup is not necessary to land this set. Ok, I'll respin, thanks. ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-26 19:13 ` Ihor Solodrai @ 2026-03-27 20:48 ` Alexei Starovoitov 2026-03-27 20:55 ` Ihor Solodrai 0 siblings, 1 reply; 21+ messages in thread From: Alexei Starovoitov @ 2026-03-27 20:48 UTC (permalink / raw) To: Ihor Solodrai Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, Kernel Team On Thu, Mar 26, 2026 at 12:13 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: > > > Currently BTF_ID_LIST with subsequent BTF_ID create symbols for the > list and for each entry like follows: > > __BTF_ID__##prefix##__##name##__<counter><line> > > For example: > > __BTF_ID__func__bpf_obj_new__8524584 > > These symbols refer to a piece of memory storing BTF ID of course, > which is how resolve_btfids can patch them and kernel use them. > > So the idea I vibe-coded is simple: let's directly access these > symbols at the source level instead of maintaining an enums. > > The obstacle to this was `<counter><line>` suffix, which exists to > avoid conflicts between BTF_ID_* containers in the same source > file. But the suffix also prevents us from using the symbol directly > in C, since you don't know the <counter><line>. > > Well since every BTF_ID_LIST and co has a unique name (at the source > file level), we can use it as a disambiguator suffix instead of the > <counter><line>, getting something like: > > __BTF_ID__func__bpf_obj_new_impl__special_kfunc_list > > Having that we can add extern declarations (so that C compiler is > aware of the symbols) and a helper macro to reconstruct the symbol and > read its value: > > #define BTF_ID_NAMED(list, prefix, name) \ > __BTF_ID(__BTF_ID__##prefix##__##name##__##list) \ > extern u32 __BTF_ID__##prefix##__##name##__##list; > > #define btf_id_named(list, prefix, name) \ > (__BTF_ID__##prefix##__##name##__##list) > > btf_id_named(special_kfunc_list, func, bpf_obj_new_impl) > > I don't know why AI decided it's a good idea to add one more level of > aliasing here with asm(), but it is not necessary for this macrology > to work. > > Additional caveat is that resolve_btfids needs to be taught about new > suffixes, but that's relatively trivial. > > The above works, and the question now is if we like this mechanism > more than current setup with explicit enums. Got it. Thanks for the explanation. Your approach does sound a lot better than explicit enums. Make a proper patch out of it. If we do this can we remove <counter><line> approach everywhere while at it and only use named ? ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-27 20:48 ` Alexei Starovoitov @ 2026-03-27 20:55 ` Ihor Solodrai 2026-03-27 21:00 ` Alexei Starovoitov 0 siblings, 1 reply; 21+ messages in thread From: Ihor Solodrai @ 2026-03-27 20:55 UTC (permalink / raw) To: Alexei Starovoitov Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, Kernel Team On 3/27/26 1:48 PM, Alexei Starovoitov wrote: > On Thu, Mar 26, 2026 at 12:13 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: >> >> [...] >> >> The above works, and the question now is if we like this mechanism >> more than current setup with explicit enums. > > Got it. Thanks for the explanation. > Your approach does sound a lot better than explicit enums. > Make a proper patch out of it. > > If we do this can we remove <counter><line> approach everywhere > while at it and only use named ? This crossed my mind too. I'll try to *replace* the suffixes and if it works fine submit that. One inconvenience is that with named suffixes BTF_ID() macro will have to accept an additional arg (the list name), but I think that's ok. We already have to pass struct/func everywhere too. ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-27 20:55 ` Ihor Solodrai @ 2026-03-27 21:00 ` Alexei Starovoitov 2026-03-27 21:08 ` Ihor Solodrai 0 siblings, 1 reply; 21+ messages in thread From: Alexei Starovoitov @ 2026-03-27 21:00 UTC (permalink / raw) To: Ihor Solodrai Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, Kernel Team On Fri, Mar 27, 2026 at 1:55 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: > > On 3/27/26 1:48 PM, Alexei Starovoitov wrote: > > On Thu, Mar 26, 2026 at 12:13 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: > >> > >> [...] > >> > >> The above works, and the question now is if we like this mechanism > >> more than current setup with explicit enums. > > > > Got it. Thanks for the explanation. > > Your approach does sound a lot better than explicit enums. > > Make a proper patch out of it. > > > > If we do this can we remove <counter><line> approach everywhere > > while at it and only use named ? > > This crossed my mind too. I'll try to *replace* the suffixes and if > it works fine submit that. > > One inconvenience is that with named suffixes BTF_ID() macro will > have to accept an additional arg (the list name), but I think > that's ok. We already have to pass struct/func everywhere too. and that name has to be unique.. I think it's fine. I'd do such change, and reuse all of BTF_ID macros. So that only BTF_SET_START line will differ. The overall diff stat shouldn't be big ? ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-27 21:00 ` Alexei Starovoitov @ 2026-03-27 21:08 ` Ihor Solodrai 2026-03-27 21:47 ` Alexei Starovoitov 0 siblings, 1 reply; 21+ messages in thread From: Ihor Solodrai @ 2026-03-27 21:08 UTC (permalink / raw) To: Alexei Starovoitov Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, Kernel Team On 3/27/26 2:00 PM, Alexei Starovoitov wrote: > On Fri, Mar 27, 2026 at 1:55 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: >> >> On 3/27/26 1:48 PM, Alexei Starovoitov wrote: >>> On Thu, Mar 26, 2026 at 12:13 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: >>>> >>>> [...] >>>> >>>> The above works, and the question now is if we like this mechanism >>>> more than current setup with explicit enums. >>> >>> Got it. Thanks for the explanation. >>> Your approach does sound a lot better than explicit enums. >>> Make a proper patch out of it. >>> >>> If we do this can we remove <counter><line> approach everywhere >>> while at it and only use named ? >> >> This crossed my mind too. I'll try to *replace* the suffixes and if >> it works fine submit that. >> >> One inconvenience is that with named suffixes BTF_ID() macro will >> have to accept an additional arg (the list name), but I think >> that's ok. We already have to pass struct/func everywhere too. > > and that name has to be unique.. I think it's fine. > I'd do such change, and reuse all of BTF_ID macros. > So that only BTF_SET_START line will differ. > > The overall diff stat shouldn't be big ? Depends on whether we are refactoring BTF_ID, or just adding new BTF_ID_NAMED set of macros. Refactoring would touch every single BTF_ID usage, and that's pretty invasive: $ grep -r 'BTF_ID(' --include="*.[ch]" | wc -l 307 $ grep -rl 'BTF_ID(' --include="*.[ch]" fs/bpf_fs_kfuncs.c include/linux/btf_ids.h kernel/bpf/bpf_lsm.c kernel/bpf/bpf_struct_ops.c kernel/bpf/btf.c kernel/bpf/cpumask.c kernel/bpf/crypto.c kernel/bpf/helpers.c kernel/bpf/verifier.c kernel/trace/bpf_trace.c net/bpf/test_run.c net/core/filter.c net/core/xdp.c net/netfilter/nf_conntrack_bpf.c net/sched/bpf_qdisc.c tools/include/linux/btf_ids.h tools/testing/selftests/bpf/prog_tests/resolve_btfids.c tools/testing/selftests/bpf/test_kmods/bpf_testmod.c ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-27 21:08 ` Ihor Solodrai @ 2026-03-27 21:47 ` Alexei Starovoitov 2026-03-27 22:06 ` Ihor Solodrai 0 siblings, 1 reply; 21+ messages in thread From: Alexei Starovoitov @ 2026-03-27 21:47 UTC (permalink / raw) To: Ihor Solodrai Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, Kernel Team On Fri, Mar 27, 2026 at 2:08 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: > > On 3/27/26 2:00 PM, Alexei Starovoitov wrote: > > On Fri, Mar 27, 2026 at 1:55 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: > >> > >> On 3/27/26 1:48 PM, Alexei Starovoitov wrote: > >>> On Thu, Mar 26, 2026 at 12:13 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: > >>>> > >>>> [...] > >>>> > >>>> The above works, and the question now is if we like this mechanism > >>>> more than current setup with explicit enums. > >>> > >>> Got it. Thanks for the explanation. > >>> Your approach does sound a lot better than explicit enums. > >>> Make a proper patch out of it. > >>> > >>> If we do this can we remove <counter><line> approach everywhere > >>> while at it and only use named ? > >> > >> This crossed my mind too. I'll try to *replace* the suffixes and if > >> it works fine submit that. > >> > >> One inconvenience is that with named suffixes BTF_ID() macro will > >> have to accept an additional arg (the list name), but I think > >> that's ok. We already have to pass struct/func everywhere too. > > > > and that name has to be unique.. I think it's fine. > > I'd do such change, and reuse all of BTF_ID macros. > > So that only BTF_SET_START line will differ. > > > > The overall diff stat shouldn't be big ? > > Depends on whether we are refactoring BTF_ID, or just adding new > BTF_ID_NAMED set of macros. I meant that BTF_ID_NAMED(special_kfunc_list would somehow remember that it belongs to BTF_ID_LIST_NAMED that started this "scrope" few lines above an actual BTF_ID(..) ? or for BTF_ID(func, bpf_lsm_bpf_prog_free) we consider 'func' to be that 'scope' ? we don't have uniqueness across files: BTF_SET_START(untrusted_lsm_hooks) BTF_ID(func, bpf_lsm_bpf_map_free) BTF_ID(func, bpf_lsm_bpf_prog_free) .. BTF_SET_START(sleepable_lsm_hooks) ... BTF_ID(func, bpf_lsm_bpf_map_free) ... BTF_ID(func, bpf_lsm_bpf_prog_free) but does it matter? This new BTF_ID() will define multiple extern u32 with the same name. Make them weak since they point to the same id ? ^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS 2026-03-27 21:47 ` Alexei Starovoitov @ 2026-03-27 22:06 ` Ihor Solodrai 0 siblings, 0 replies; 21+ messages in thread From: Ihor Solodrai @ 2026-03-27 22:06 UTC (permalink / raw) To: Alexei Starovoitov Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, Eduard Zingerman, bpf, Kernel Team On 3/27/26 2:47 PM, Alexei Starovoitov wrote: > On Fri, Mar 27, 2026 at 2:08 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: >> >> On 3/27/26 2:00 PM, Alexei Starovoitov wrote: >>> On Fri, Mar 27, 2026 at 1:55 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: >>>> >>>> On 3/27/26 1:48 PM, Alexei Starovoitov wrote: >>>>> On Thu, Mar 26, 2026 at 12:13 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: >>>>>> >>>>>> [...] >>>>>> >>>>>> The above works, and the question now is if we like this mechanism >>>>>> more than current setup with explicit enums. >>>>> >>>>> Got it. Thanks for the explanation. >>>>> Your approach does sound a lot better than explicit enums. >>>>> Make a proper patch out of it. >>>>> >>>>> If we do this can we remove <counter><line> approach everywhere >>>>> while at it and only use named ? >>>> >>>> This crossed my mind too. I'll try to *replace* the suffixes and if >>>> it works fine submit that. >>>> >>>> One inconvenience is that with named suffixes BTF_ID() macro will >>>> have to accept an additional arg (the list name), but I think >>>> that's ok. We already have to pass struct/func everywhere too. >>> >>> and that name has to be unique.. I think it's fine. >>> I'd do such change, and reuse all of BTF_ID macros. >>> So that only BTF_SET_START line will differ. >>> >>> The overall diff stat shouldn't be big ? >> >> Depends on whether we are refactoring BTF_ID, or just adding new >> BTF_ID_NAMED set of macros. > > I meant that BTF_ID_NAMED(special_kfunc_list > would somehow remember that it belongs to BTF_ID_LIST_NAMED > that started this "scrope" few lines above an actual BTF_ID(..) > ? > > or for BTF_ID(func, bpf_lsm_bpf_prog_free) > we consider 'func' to be that 'scope' ? > > we don't have uniqueness across files: > BTF_SET_START(untrusted_lsm_hooks) > BTF_ID(func, bpf_lsm_bpf_map_free) > BTF_ID(func, bpf_lsm_bpf_prog_free) > .. > > BTF_SET_START(sleepable_lsm_hooks) > ... > BTF_ID(func, bpf_lsm_bpf_map_free) > ... > BTF_ID(func, bpf_lsm_bpf_prog_free) > > but does it matter? > This new BTF_ID() will define multiple extern u32 with the same name. > Make them weak since they point to the same id ? Let me play with this, maybe I (or AI) can come up with something like you're describing. I'll aim to submit a patch next week. ^ permalink raw reply [flat|nested] 21+ messages in thread
end of thread, other threads:[~2026-03-27 22:06 UTC | newest] Thread overview: 21+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2026-03-18 23:42 [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS Ihor Solodrai 2026-03-18 23:42 ` [PATCH bpf-next v3 2/2] selftests/bpf: Update kfuncs using btf_struct_meta to new variants Ihor Solodrai 2026-03-19 12:30 ` Jiri Olsa 2026-03-19 20:43 ` Ihor Solodrai 2026-03-20 11:06 ` Jiri Olsa 2026-03-20 14:50 ` Mykyta Yatsenko 2026-03-19 12:25 ` [PATCH bpf-next v3 1/2] bpf: Support struct btf_struct_meta via KF_IMPLICIT_ARGS Jiri Olsa 2026-03-19 20:37 ` Ihor Solodrai 2026-03-20 15:49 ` Mykyta Yatsenko 2026-03-27 0:16 ` Ihor Solodrai 2026-03-27 19:19 ` Mykyta Yatsenko 2026-03-21 20:27 ` Alexei Starovoitov 2026-03-23 19:58 ` Ihor Solodrai 2026-03-24 17:22 ` Alexei Starovoitov 2026-03-26 19:13 ` Ihor Solodrai 2026-03-27 20:48 ` Alexei Starovoitov 2026-03-27 20:55 ` Ihor Solodrai 2026-03-27 21:00 ` Alexei Starovoitov 2026-03-27 21:08 ` Ihor Solodrai 2026-03-27 21:47 ` Alexei Starovoitov 2026-03-27 22:06 ` Ihor Solodrai
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox