* [PATCH bpf-next v4 01/12] bpf: Simplify mark_stack_slot_obj_read() and callers
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
@ 2026-05-06 14:26 ` Amery Hung
2026-05-11 17:17 ` Eduard Zingerman
2026-05-06 14:26 ` [PATCH bpf-next v4 02/12] bpf: Unify dynptr handling in the verifier Amery Hung
` (10 subsequent siblings)
11 siblings, 1 reply; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:26 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
Directly call mark_stack_slot_obj_read() from function processing
iter, dynptr and irq_flag. To prepare for unifying dynptr handling,
dynptr_get_spi() will be moved out of mark_dynptr_read(),
As mark_dynptr_read() would join mark_iter_read() as a thin wrapper of
mark_stack_slot_obj_read(), just open code the helpers.
In addition, since 6762e3a0bce5 ("bpf: simplify liveness to use
(callsite, depth) keyed func_instances") has made
mark_stack_slot_obj_read() always succeed. Return void and drop the now
unused bpf_reg_state argument.
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
kernel/bpf/verifier.c | 69 +++++++++++++------------------------------
1 file changed, 21 insertions(+), 48 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 11054ad89c14..6931012d3ee2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2972,50 +2972,13 @@ static int sort_subprogs_topo(struct bpf_verifier_env *env)
return ret;
}
-static int mark_stack_slot_obj_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- int spi, int nr_slots)
+static void mark_stack_slot_obj_read(struct bpf_verifier_env *env,
+ int spi, int nr_slots)
{
int i;
for (i = 0; i < nr_slots; i++)
mark_stack_slot_scratched(env, spi - i);
- return 0;
-}
-
-static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
-{
- int spi;
-
- /* For CONST_PTR_TO_DYNPTR, it must have already been done by
- * check_reg_arg in check_helper_call and mark_btf_func_reg_size in
- * check_kfunc_call.
- */
- if (reg->type == CONST_PTR_TO_DYNPTR)
- return 0;
- spi = dynptr_get_spi(env, reg);
- if (spi < 0)
- return spi;
- /* Caller ensures dynptr is valid and initialized, which means spi is in
- * bounds and spi is the first dynptr slot. Simply mark stack slot as
- * read.
- */
- return mark_stack_slot_obj_read(env, reg, spi, BPF_DYNPTR_NR_SLOTS);
-}
-
-static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- int spi, int nr_slots)
-{
- return mark_stack_slot_obj_read(env, reg, spi, nr_slots);
-}
-
-static int mark_irq_flag_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
-{
- int spi;
-
- spi = irq_flag_get_spi(env, reg);
- if (spi < 0)
- return spi;
- return mark_stack_slot_obj_read(env, reg, spi, 1);
}
/* This function is supposed to be used by the following 32-bit optimization
@@ -7078,7 +7041,7 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno,
static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_state *reg, argno_t argno, int insn_idx,
enum bpf_arg_type arg_type, int clone_ref_obj_id)
{
- int err;
+ int spi, err = 0;
if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) {
verbose(env,
@@ -7140,7 +7103,17 @@ static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_stat
return -EINVAL;
}
- err = mark_dynptr_read(env, reg);
+ if (reg->type != CONST_PTR_TO_DYNPTR) {
+ spi = dynptr_get_spi(env, reg);
+ if (spi < 0)
+ return spi;
+
+ /*
+ * For CONST_PTR_TO_DYNPTR, reg is already scratched by check_reg_arg
+ * in check_helper_call and mark_btf_func_reg_size in check_kfunc_call.
+ */
+ mark_stack_slot_obj_read(env, spi, BPF_DYNPTR_NR_SLOTS);
+ }
}
return err;
}
@@ -7250,9 +7223,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, struct bpf_reg_state *
if (spi < 0)
return spi;
- err = mark_iter_read(env, reg, spi, nr_slots);
- if (err)
- return err;
+ mark_stack_slot_obj_read(env, spi, nr_slots);
/* remember meta->iter info for process_iter_next_call() */
meta->iter.spi = spi;
@@ -11150,7 +11121,7 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
static int process_irq_flag(struct bpf_verifier_env *env, struct bpf_reg_state *reg, argno_t argno,
struct bpf_kfunc_call_arg_meta *meta)
{
- int err, kfunc_class = IRQ_NATIVE_KFUNC;
+ int err, spi, kfunc_class = IRQ_NATIVE_KFUNC;
bool irq_save;
if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_save] ||
@@ -11191,9 +11162,11 @@ static int process_irq_flag(struct bpf_verifier_env *env, struct bpf_reg_state *
return err;
}
- err = mark_irq_flag_read(env, reg);
- if (err)
- return err;
+ spi = irq_flag_get_spi(env, reg);
+ if (spi < 0)
+ return spi;
+
+ mark_stack_slot_obj_read(env, spi, 1);
err = unmark_stack_slot_irq_flag(env, reg, kfunc_class);
if (err)
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH bpf-next v4 01/12] bpf: Simplify mark_stack_slot_obj_read() and callers
2026-05-06 14:26 ` [PATCH bpf-next v4 01/12] bpf: Simplify mark_stack_slot_obj_read() and callers Amery Hung
@ 2026-05-11 17:17 ` Eduard Zingerman
0 siblings, 0 replies; 22+ messages in thread
From: Eduard Zingerman @ 2026-05-11 17:17 UTC (permalink / raw)
To: Amery Hung, bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, memxor, martin.lau,
mykyta.yatsenko5, kernel-team
On Wed, 2026-05-06 at 07:26 -0700, Amery Hung wrote:
> Directly call mark_stack_slot_obj_read() from function processing
> iter, dynptr and irq_flag. To prepare for unifying dynptr handling,
> dynptr_get_spi() will be moved out of mark_dynptr_read(),
> As mark_dynptr_read() would join mark_iter_read() as a thin wrapper of
> mark_stack_slot_obj_read(), just open code the helpers.
>
> In addition, since 6762e3a0bce5 ("bpf: simplify liveness to use
> (callsite, depth) keyed func_instances") has made
> mark_stack_slot_obj_read() always succeed. Return void and drop the now
> unused bpf_reg_state argument.
>
> Signed-off-by: Amery Hung <ameryhung@gmail.com>
> ---
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
[...]
> +static void mark_stack_slot_obj_read(struct bpf_verifier_env *env,
> + int spi, int nr_slots)
Nit: this is probably a leftover from the stack liveness commit,
but I think that a better name for this function would now be
mark_stack_slots_scratched.
[...]
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH bpf-next v4 02/12] bpf: Unify dynptr handling in the verifier
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
2026-05-06 14:26 ` [PATCH bpf-next v4 01/12] bpf: Simplify mark_stack_slot_obj_read() and callers Amery Hung
@ 2026-05-06 14:26 ` Amery Hung
2026-05-06 15:27 ` bot+bpf-ci
2026-05-06 14:26 ` [PATCH bpf-next v4 03/12] bpf: Assign reg->id when getting referenced kptr from ctx Amery Hung
` (9 subsequent siblings)
11 siblings, 1 reply; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:26 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
Simplify dynptr checking for helper and kfunc by unifying it. Remember
the initialized dynptr (i.e.,g !(arg_type |= MEM_UNINIT)) pass to a
dynptr kfunc during process_dynptr_func() so that we can easily
retrieve the information for verification later. By saving it in
meta->dynptr, there is no need to call dynptr helpers such as
dynptr_id(), dynptr_ref_obj_id() and dynptr_type() in check_func_arg().
Remove and open code the helpers in process_dynptr_func() when
saving id, ref_obj_id, and type.
Besides, since dynptr ref_obj_id information is now pass around in
meta->bpf_dynptr_desc, drop the check in helper_multiple_ref_obj_use.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Mykyta Yatsenko <yatsenko@meta.com>
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
include/linux/bpf_verifier.h | 13 ++-
kernel/bpf/verifier.c | 178 +++++++----------------------------
2 files changed, 40 insertions(+), 151 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 976e2b2f40e8..f330e9cf297e 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -1376,6 +1376,13 @@ struct bpf_map_desc {
int uid;
};
+/* The last initialized dynptr; Populated by process_dynptr_func() */
+struct bpf_dynptr_desc {
+ enum bpf_dynptr_type type;
+ u32 id;
+ u32 ref_obj_id;
+};
+
struct bpf_kfunc_call_arg_meta {
/* In parameters */
struct btf *btf;
@@ -1416,16 +1423,12 @@ struct bpf_kfunc_call_arg_meta {
struct {
struct btf_field *field;
} arg_rbtree_root;
- struct {
- enum bpf_dynptr_type type;
- u32 id;
- u32 ref_obj_id;
- } initialized_dynptr;
struct {
u8 spi;
u8 frameno;
} iter;
struct bpf_map_desc map;
+ struct bpf_dynptr_desc dynptr;
u64 mem_size;
};
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 6931012d3ee2..0bee6279c38e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -233,6 +233,7 @@ static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
struct bpf_call_arg_meta {
struct bpf_map_desc map;
+ struct bpf_dynptr_desc dynptr;
bool raw_mode;
bool pkt_access;
u8 release_regno;
@@ -241,7 +242,6 @@ struct bpf_call_arg_meta {
int mem_size;
u64 msize_max_value;
int ref_obj_id;
- int dynptr_id;
int func_id;
struct btf *btf;
u32 btf_id;
@@ -465,11 +465,6 @@ static bool is_ptr_cast_function(enum bpf_func_id func_id)
func_id == BPF_FUNC_skc_to_tcp_request_sock;
}
-static bool is_dynptr_ref_function(enum bpf_func_id func_id)
-{
- return func_id == BPF_FUNC_dynptr_data;
-}
-
static bool is_sync_callback_calling_kfunc(u32 btf_id);
static bool is_async_callback_calling_kfunc(u32 btf_id);
static bool is_callback_calling_kfunc(u32 btf_id);
@@ -538,8 +533,6 @@ static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
ref_obj_uses++;
if (is_acquire_function(func_id, map))
ref_obj_uses++;
- if (is_dynptr_ref_function(func_id))
- ref_obj_uses++;
return ref_obj_uses > 1;
}
@@ -7038,8 +7031,9 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno,
* use case. The second level is tracked using the upper bit of bpf_dynptr->size
* and checked dynamically during runtime.
*/
-static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_state *reg, argno_t argno, int insn_idx,
- enum bpf_arg_type arg_type, int clone_ref_obj_id)
+static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ argno_t argno, int insn_idx, enum bpf_arg_type arg_type,
+ int clone_ref_obj_id, struct bpf_dynptr_desc *dynptr)
{
int spi, err = 0;
@@ -7104,6 +7098,8 @@ static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_stat
}
if (reg->type != CONST_PTR_TO_DYNPTR) {
+ struct bpf_func_state *state = bpf_func(env, reg);
+
spi = dynptr_get_spi(env, reg);
if (spi < 0)
return spi;
@@ -7113,6 +7109,14 @@ static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_stat
* in check_helper_call and mark_btf_func_reg_size in check_kfunc_call.
*/
mark_stack_slot_obj_read(env, spi, BPF_DYNPTR_NR_SLOTS);
+
+ reg = &state->stack[spi].spilled_ptr;
+ }
+
+ if (dynptr) {
+ dynptr->type = reg->dynptr.type;
+ dynptr->id = reg->id;
+ dynptr->ref_obj_id = reg->ref_obj_id;
}
}
return err;
@@ -7882,72 +7886,6 @@ static int check_func_arg_reg_off(struct bpf_verifier_env *env,
}
}
-static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env,
- const struct bpf_func_proto *fn,
- struct bpf_reg_state *regs)
-{
- struct bpf_reg_state *state = NULL;
- int i;
-
- for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++)
- if (arg_type_is_dynptr(fn->arg_type[i])) {
- if (state) {
- verbose(env, "verifier internal error: multiple dynptr args\n");
- return NULL;
- }
- state = ®s[BPF_REG_1 + i];
- }
-
- if (!state)
- verbose(env, "verifier internal error: no dynptr arg found\n");
-
- return state;
-}
-
-static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
-{
- struct bpf_func_state *state = bpf_func(env, reg);
- int spi;
-
- if (reg->type == CONST_PTR_TO_DYNPTR)
- return reg->id;
- spi = dynptr_get_spi(env, reg);
- if (spi < 0)
- return spi;
- return state->stack[spi].spilled_ptr.id;
-}
-
-static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
-{
- struct bpf_func_state *state = bpf_func(env, reg);
- int spi;
-
- if (reg->type == CONST_PTR_TO_DYNPTR)
- return reg->ref_obj_id;
- spi = dynptr_get_spi(env, reg);
- if (spi < 0)
- return spi;
- return state->stack[spi].spilled_ptr.ref_obj_id;
-}
-
-static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg)
-{
- struct bpf_func_state *state = bpf_func(env, reg);
- int spi;
-
- if (reg->type == CONST_PTR_TO_DYNPTR)
- return reg->dynptr.type;
-
- spi = bpf_get_spi(reg->var_off.value);
- if (spi < 0) {
- verbose(env, "verifier internal error: invalid spi when querying dynptr type\n");
- return BPF_DYNPTR_TYPE_INVALID;
- }
-
- return state->stack[spi].spilled_ptr.dynptr.type;
-}
-
static int check_arg_const_str(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, argno_t argno)
{
@@ -8305,7 +8243,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
true, meta);
break;
case ARG_PTR_TO_DYNPTR:
- err = process_dynptr_func(env, reg, argno_from_reg(regno), insn_idx, arg_type, 0);
+ err = process_dynptr_func(env, reg, argno_from_reg(regno), insn_idx, arg_type, 0,
+ &meta->dynptr);
if (err)
return err;
break;
@@ -8968,7 +8907,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
if (ret)
return ret;
- ret = process_dynptr_func(env, reg, argno, -1, arg->arg_type, 0);
+ ret = process_dynptr_func(env, reg, argno, -1, arg->arg_type, 0, NULL);
if (ret)
return ret;
} else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
@@ -10048,52 +9987,10 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
}
}
break;
- case BPF_FUNC_dynptr_data:
- {
- struct bpf_reg_state *reg;
- int id, ref_obj_id;
-
- reg = get_dynptr_arg_reg(env, fn, regs);
- if (!reg)
- return -EFAULT;
-
-
- if (meta.dynptr_id) {
- verifier_bug(env, "meta.dynptr_id already set");
- return -EFAULT;
- }
- if (meta.ref_obj_id) {
- verifier_bug(env, "meta.ref_obj_id already set");
- return -EFAULT;
- }
-
- id = dynptr_id(env, reg);
- if (id < 0) {
- verifier_bug(env, "failed to obtain dynptr id");
- return id;
- }
-
- ref_obj_id = dynptr_ref_obj_id(env, reg);
- if (ref_obj_id < 0) {
- verifier_bug(env, "failed to obtain dynptr ref_obj_id");
- return ref_obj_id;
- }
-
- meta.dynptr_id = id;
- meta.ref_obj_id = ref_obj_id;
-
- break;
- }
case BPF_FUNC_dynptr_write:
{
- enum bpf_dynptr_type dynptr_type;
- struct bpf_reg_state *reg;
+ enum bpf_dynptr_type dynptr_type = meta.dynptr.type;
- reg = get_dynptr_arg_reg(env, fn, regs);
- if (!reg)
- return -EFAULT;
-
- dynptr_type = dynptr_get_type(env, reg);
if (dynptr_type == BPF_DYNPTR_TYPE_INVALID)
return -EFAULT;
@@ -10284,10 +10181,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EFAULT;
}
- if (is_dynptr_ref_function(func_id))
- regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
-
- if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
+ if (is_ptr_cast_function(func_id)) {
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
} else if (is_acquire_function(func_id, meta.map.ptr)) {
@@ -10301,6 +10195,11 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
regs[BPF_REG_0].ref_obj_id = id;
}
+ if (func_id == BPF_FUNC_dynptr_data) {
+ regs[BPF_REG_0].dynptr_id = meta.dynptr.id;
+ regs[BPF_REG_0].ref_obj_id = meta.dynptr.ref_obj_id;
+ }
+
err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta);
if (err)
return err;
@@ -11896,7 +11795,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
meta->release_regno = regno;
} else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] &&
(dynptr_arg_type & MEM_UNINIT)) {
- enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type;
+ enum bpf_dynptr_type parent_type = meta->dynptr.type;
if (parent_type == BPF_DYNPTR_TYPE_INVALID) {
verifier_bug(env, "no dynptr type for parent of clone");
@@ -11904,30 +11803,17 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
dynptr_arg_type |= (unsigned int)get_dynptr_type_flag(parent_type);
- clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id;
+ clone_ref_obj_id = meta->dynptr.ref_obj_id;
if (dynptr_type_refcounted(parent_type) && !clone_ref_obj_id) {
verifier_bug(env, "missing ref obj id for parent of clone");
return -EFAULT;
}
}
- ret = process_dynptr_func(env, reg, argno, insn_idx,
- dynptr_arg_type, clone_ref_obj_id);
+ ret = process_dynptr_func(env, reg, argno, insn_idx, dynptr_arg_type,
+ clone_ref_obj_id, &meta->dynptr);
if (ret < 0)
return ret;
-
- if (!(dynptr_arg_type & MEM_UNINIT)) {
- int id = dynptr_id(env, reg);
-
- if (id < 0) {
- verifier_bug(env, "failed to obtain dynptr id");
- return id;
- }
- meta->initialized_dynptr.id = id;
- meta->initialized_dynptr.type = dynptr_get_type(env, reg);
- meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg);
- }
-
break;
}
case KF_ARG_PTR_TO_ITER:
@@ -12550,7 +12436,7 @@ static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_ca
}
} else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_slice] ||
meta->func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) {
- enum bpf_type_flag type_flag = get_dynptr_type_flag(meta->initialized_dynptr.type);
+ enum bpf_type_flag type_flag = get_dynptr_type_flag(meta->dynptr.type);
mark_reg_known_zero(env, regs, BPF_REG_0);
@@ -12574,11 +12460,11 @@ static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_ca
}
}
- if (!meta->initialized_dynptr.id) {
+ if (!meta->dynptr.id) {
verifier_bug(env, "no dynptr id");
return -EFAULT;
}
- regs[BPF_REG_0].dynptr_id = meta->initialized_dynptr.id;
+ regs[BPF_REG_0].dynptr_id = meta->dynptr.id;
/* we don't need to set BPF_REG_0's ref obj id
* because packet slices are not refcounted (see
@@ -12766,7 +12652,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (meta.release_regno) {
struct bpf_reg_state *reg = ®s[meta.release_regno];
- if (meta.initialized_dynptr.ref_obj_id) {
+ if (meta.dynptr.ref_obj_id) {
err = unmark_stack_slots_dynptr(env, reg);
} else {
err = release_reference(env, reg->ref_obj_id);
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH bpf-next v4 02/12] bpf: Unify dynptr handling in the verifier
2026-05-06 14:26 ` [PATCH bpf-next v4 02/12] bpf: Unify dynptr handling in the verifier Amery Hung
@ 2026-05-06 15:27 ` bot+bpf-ci
2026-05-07 12:22 ` Amery Hung
0 siblings, 1 reply; 22+ messages in thread
From: bot+bpf-ci @ 2026-05-06 15:27 UTC (permalink / raw)
To: ameryhung, bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team, ast, andrii,
daniel, martin.lau, eddyz87, yonghong.song, clm, ihor.solodrai
[-- Attachment #1: Type: text/plain, Size: 1091 bytes --]
> diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
> index 976e2b2f40e8..f330e9cf297e 100644
> --- a/include/linux/bpf_verifier.h
> +++ b/include/linux/bpf_verifier.h
> @@ -1376,6 +1376,13 @@ struct bpf_map_desc {
> int uid;
> };
>
> +/* The last initialized dynptr; Populated by process_dynptr_func() */
> +struct bpf_dynptr_desc {
> + enum bpf_dynptr_type type;
> + u32 id;
> + u32 ref_obj_id;
> +};
> +
A review comment from Mykyta Yatsenko requested that a comment be added
to the ref_obj_id field explaining what it's for:
https://lore.kernel.org/bpf/87ikb2tkta.fsf@gmail.com/
The author mentioned in v3 that comments for id, ref_obj_id, and parent_id
would be added in a later refactor patch. However, should this struct have
inline documentation now for clarity, particularly for the ref_obj_id field?
[ ... ]
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/25442456702
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH bpf-next v4 02/12] bpf: Unify dynptr handling in the verifier
2026-05-06 15:27 ` bot+bpf-ci
@ 2026-05-07 12:22 ` Amery Hung
0 siblings, 0 replies; 22+ messages in thread
From: Amery Hung @ 2026-05-07 12:22 UTC (permalink / raw)
To: bot+bpf-ci
Cc: bpf, netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, kernel-team, ast, yonghong.song,
clm, ihor.solodrai
On Wed, May 6, 2026 at 4:27 PM <bot+bpf-ci@kernel.org> wrote:
>
> > diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
> > index 976e2b2f40e8..f330e9cf297e 100644
> > --- a/include/linux/bpf_verifier.h
> > +++ b/include/linux/bpf_verifier.h
> > @@ -1376,6 +1376,13 @@ struct bpf_map_desc {
> > int uid;
> > };
> >
> > +/* The last initialized dynptr; Populated by process_dynptr_func() */
> > +struct bpf_dynptr_desc {
> > + enum bpf_dynptr_type type;
> > + u32 id;
> > + u32 ref_obj_id;
> > +};
> > +
>
> A review comment from Mykyta Yatsenko requested that a comment be added
> to the ref_obj_id field explaining what it's for:
>
> https://lore.kernel.org/bpf/87ikb2tkta.fsf@gmail.com/
>
> The author mentioned in v3 that comments for id, ref_obj_id, and parent_id
> would be added in a later refactor patch. However, should this struct have
> inline documentation now for clarity, particularly for the ref_obj_id field?
For the meaning of id, ref_obj_id and parent_id, keep the
documentation in where they are defined: bpf_verifier.h
>
> [ ... ]
>
>
> ---
> AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
> See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
>
> CI run summary: https://github.com/kernel-patches/bpf/actions/runs/25442456702
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH bpf-next v4 03/12] bpf: Assign reg->id when getting referenced kptr from ctx
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
2026-05-06 14:26 ` [PATCH bpf-next v4 01/12] bpf: Simplify mark_stack_slot_obj_read() and callers Amery Hung
2026-05-06 14:26 ` [PATCH bpf-next v4 02/12] bpf: Unify dynptr handling in the verifier Amery Hung
@ 2026-05-06 14:26 ` Amery Hung
2026-05-06 15:27 ` bot+bpf-ci
2026-05-11 21:31 ` Eduard Zingerman
2026-05-06 14:27 ` [PATCH bpf-next v4 04/12] bpf: Preserve reg->id of pointer objects after null-check Amery Hung
` (8 subsequent siblings)
11 siblings, 2 replies; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:26 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
Assign reg->id when getting referenced kptr from read program context
to be consistent with R0 of KF_ACQUIRE kfunc. skb dynptr will track the
referenced skb in qdisc programs using a new field reg->parent_id in
a later patch.
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
kernel/bpf/verifier.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 0bee6279c38e..0891a3ac4d51 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6057,8 +6057,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
} else {
mark_reg_known_zero(env, regs,
value_regno);
- if (type_may_be_null(info.reg_type))
- regs[value_regno].id = ++env->id_gen;
/* A load of ctx field could have different
* actual load size with the one encoded in the
* insn. When the dst is PTR, it is for sure not
@@ -6068,8 +6066,11 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
if (base_type(info.reg_type) == PTR_TO_BTF_ID) {
regs[value_regno].btf = info.btf;
regs[value_regno].btf_id = info.btf_id;
+ regs[value_regno].id = info.ref_obj_id;
regs[value_regno].ref_obj_id = info.ref_obj_id;
}
+ if (type_may_be_null(info.reg_type) && !regs[value_regno].id)
+ regs[value_regno].id = ++env->id_gen;
}
regs[value_regno].type = info.reg_type;
}
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH bpf-next v4 03/12] bpf: Assign reg->id when getting referenced kptr from ctx
2026-05-06 14:26 ` [PATCH bpf-next v4 03/12] bpf: Assign reg->id when getting referenced kptr from ctx Amery Hung
@ 2026-05-06 15:27 ` bot+bpf-ci
2026-05-07 12:38 ` Amery Hung
2026-05-11 21:31 ` Eduard Zingerman
1 sibling, 1 reply; 22+ messages in thread
From: bot+bpf-ci @ 2026-05-06 15:27 UTC (permalink / raw)
To: ameryhung, bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team, ast, andrii,
daniel, martin.lau, eddyz87, yonghong.song, clm, ihor.solodrai
[-- Attachment #1: Type: text/plain, Size: 3173 bytes --]
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -6057,8 +6057,6 @@ struct bpf_insn_access_aux info = {
> } else {
> mark_reg_known_zero(env, regs,
> value_regno);
> - if (type_may_be_null(info.reg_type))
> - regs[value_regno].id = ++env->id_gen;
> /* A load of ctx field could have different
> * actual load size with the one encoded in the
> * insn. When the dst is PTR, it is for sure not
> @@ -6068,8 +6066,11 @@ struct bpf_insn_access_aux info = {
> if (base_type(info.reg_type) == PTR_TO_BTF_ID) {
> regs[value_regno].btf = info.btf;
> regs[value_regno].btf_id = info.btf_id;
> + regs[value_regno].id = info.ref_obj_id;
^^^^
Does this introduce a bug when reading the same nullable context field
multiple times?
The new code sets reg->id = info.ref_obj_id, where ref_obj_id comes from
ctx_arg_info->ref_obj_id. The ctx_arg_info structure is initialized once
during program type setup in check_attach_btf_id(), making ref_obj_id a
static value per context field, not per read.
When a BPF program reads the same nullable context field into different
registers, they would share the same ID:
r1 = ctx->nullable_kptr; // r1->id = ref_obj_id = 5
r2 = ctx->nullable_kptr; // r2->id = ref_obj_id = 5 (SAME!)
if (r1 != NULL) {
// mark_ptr_or_null_regs(id=5) marks ALL registers with id==5
// Both r1 AND r2 are now considered safe
use(r2); // BUG: r2 was never checked but verifier allows use
}
The mark_ptr_or_null_reg() function uses reg->id to identify which
registers should be updated together after a NULL check. With shared IDs,
independent reads are incorrectly treated as aliases.
The old code generated a unique ID via ++env->id_gen for each nullable
read, ensuring proper NULL-check tracking.
> regs[value_regno].ref_obj_id = info.ref_obj_id;
> }
> + if (type_may_be_null(info.reg_type) && !regs[value_regno].id)
> + regs[value_regno].id = ++env->id_gen;
Would this fallback path prevent fresh ID generation when id was already
set to ref_obj_id? The condition '!regs[value_regno].id' is now false for
nullable PTR_TO_BTF_ID pointers, so each read gets the same static ID
instead of a unique one.
Eduard Zingerman's review comment from v3 suggested that the commit message
should mention that this patch is mandatory for proper invalidation
propagation in later patches. He noted that without the line
'regs[value_regno].id = info.ref_obj_id;', the test
'bpf_qdisc_fail__invalid_dynptr_cross_frame.c:invalid_dynptr_cross_frame'
fails because the .id link is needed to propagate invalidation triggered by
'free_skb(skb);' to 'hdr'. The current commit message mentions consistency
and parent_id tracking but does not explicitly mention the invalidation
propagation aspect.
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/25442456702
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH bpf-next v4 03/12] bpf: Assign reg->id when getting referenced kptr from ctx
2026-05-06 15:27 ` bot+bpf-ci
@ 2026-05-07 12:38 ` Amery Hung
0 siblings, 0 replies; 22+ messages in thread
From: Amery Hung @ 2026-05-07 12:38 UTC (permalink / raw)
To: bot+bpf-ci
Cc: bpf, netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, kernel-team, ast, yonghong.song,
clm, ihor.solodrai
On Wed, May 6, 2026 at 4:27 PM <bot+bpf-ci@kernel.org> wrote:
>
> > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> > --- a/kernel/bpf/verifier.c
> > +++ b/kernel/bpf/verifier.c
> > @@ -6057,8 +6057,6 @@ struct bpf_insn_access_aux info = {
> > } else {
> > mark_reg_known_zero(env, regs,
> > value_regno);
> > - if (type_may_be_null(info.reg_type))
> > - regs[value_regno].id = ++env->id_gen;
> > /* A load of ctx field could have different
> > * actual load size with the one encoded in the
> > * insn. When the dst is PTR, it is for sure not
> > @@ -6068,8 +6066,11 @@ struct bpf_insn_access_aux info = {
> > if (base_type(info.reg_type) == PTR_TO_BTF_ID) {
> > regs[value_regno].btf = info.btf;
> > regs[value_regno].btf_id = info.btf_id;
> > + regs[value_regno].id = info.ref_obj_id;
> ^^^^
>
> Does this introduce a bug when reading the same nullable context field
> multiple times?
>
> The new code sets reg->id = info.ref_obj_id, where ref_obj_id comes from
> ctx_arg_info->ref_obj_id. The ctx_arg_info structure is initialized once
> during program type setup in check_attach_btf_id(), making ref_obj_id a
> static value per context field, not per read.
>
> When a BPF program reads the same nullable context field into different
> registers, they would share the same ID:
>
> r1 = ctx->nullable_kptr; // r1->id = ref_obj_id = 5
> r2 = ctx->nullable_kptr; // r2->id = ref_obj_id = 5 (SAME!)
> if (r1 != NULL) {
> // mark_ptr_or_null_regs(id=5) marks ALL registers with id==5
> // Both r1 AND r2 are now considered safe
> use(r2); // BUG: r2 was never checked but verifier allows use
> }
>
> The mark_ptr_or_null_reg() function uses reg->id to identify which
> registers should be updated together after a NULL check. With shared IDs,
> independent reads are incorrectly treated as aliases.
I don't think this is a bug. They are indeed aliases. The referenced
kptr acquired from struct_ops argument, if valid, should always have
the same ref_obj_id (also id) because they point to the same kernel
object
>
> The old code generated a unique ID via ++env->id_gen for each nullable
> read, ensuring proper NULL-check tracking.
>
> > regs[value_regno].ref_obj_id = info.ref_obj_id;
> > }
> > + if (type_may_be_null(info.reg_type) && !regs[value_regno].id)
> > + regs[value_regno].id = ++env->id_gen;
>
> Would this fallback path prevent fresh ID generation when id was already
> set to ref_obj_id? The condition '!regs[value_regno].id' is now false for
> nullable PTR_TO_BTF_ID pointers, so each read gets the same static ID
> instead of a unique one.
Yes. It is intentional. This mirror referenced object obtained through
KF_ACQUIRE kfunc, where id == ref_obj.
>
> Eduard Zingerman's review comment from v3 suggested that the commit message
> should mention that this patch is mandatory for proper invalidation
> propagation in later patches. He noted that without the line
> 'regs[value_regno].id = info.ref_obj_id;', the test
> 'bpf_qdisc_fail__invalid_dynptr_cross_frame.c:invalid_dynptr_cross_frame'
> fails because the .id link is needed to propagate invalidation triggered by
> 'free_skb(skb);' to 'hdr'. The current commit message mentions consistency
> and parent_id tracking but does not explicitly mention the invalidation
> propagation aspect.
>
>
> ---
> AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
> See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
>
> CI run summary: https://github.com/kernel-patches/bpf/actions/runs/25442456702
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH bpf-next v4 03/12] bpf: Assign reg->id when getting referenced kptr from ctx
2026-05-06 14:26 ` [PATCH bpf-next v4 03/12] bpf: Assign reg->id when getting referenced kptr from ctx Amery Hung
2026-05-06 15:27 ` bot+bpf-ci
@ 2026-05-11 21:31 ` Eduard Zingerman
1 sibling, 0 replies; 22+ messages in thread
From: Eduard Zingerman @ 2026-05-11 21:31 UTC (permalink / raw)
To: Amery Hung, bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, memxor, martin.lau,
mykyta.yatsenko5, kernel-team
On Wed, 2026-05-06 at 07:26 -0700, Amery Hung wrote:
> Assign reg->id when getting referenced kptr from read program context
> to be consistent with R0 of KF_ACQUIRE kfunc. skb dynptr will track the
> referenced skb in qdisc programs using a new field reg->parent_id in
> a later patch.
>
> Acked-by: Andrii Nakryiko <andrii@kernel.org>
> Signed-off-by: Amery Hung <ameryhung@gmail.com>
> ---
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH bpf-next v4 04/12] bpf: Preserve reg->id of pointer objects after null-check
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
` (2 preceding siblings ...)
2026-05-06 14:26 ` [PATCH bpf-next v4 03/12] bpf: Assign reg->id when getting referenced kptr from ctx Amery Hung
@ 2026-05-06 14:27 ` Amery Hung
2026-05-11 21:48 ` Eduard Zingerman
2026-05-06 14:27 ` [PATCH bpf-next v4 05/12] bpf: Refactor object relationship tracking and fix dynptr UAF bug Amery Hung
` (7 subsequent siblings)
11 siblings, 1 reply; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:27 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
Preserve reg->id of pointer objects after null-checking the register so
that children objects derived from it can still refer to it in the new
object relationship tracking mechanism introduced in a later patch. This
change incurs a slight increase in the number of states in one selftest
bpf object, rbtree_search.bpf.o. For Meta bpf objects, the increase of
states is also negligible.
Selftest BPF objects with insns_diff > 0
Program Insns (A) Insns (B) Insns (DIFF) States (A) States (B) States (DIFF)
------------------------ --------- --------- -------------- ---------- ---------- -------------
rbtree_search 6820 7326 +506 (+7.42%) 379 398 +19 (+5.01%)
Meta BPF objects with insns_diff > 0
Program Insns (A) Insns (B) Insns (DIFF) States (A) States (B) States (DIFF)
------------------------ --------- --------- -------------- ---------- ---------- -------------
ned_imex_be_tclass 52 57 +5 (+9.62%) 5 6 +1 (+20.00%)
ned_imex_be_tclass 52 57 +5 (+9.62%) 5 6 +1 (+20.00%)
ned_skop_auto_flowlabel 523 526 +3 (+0.57%) 39 40 +1 (+2.56%)
ned_skop_mss 289 292 +3 (+1.04%) 20 20 +0 (+0.00%)
ned_skopt_bet_classifier 78 82 +4 (+5.13%) 8 8 +0 (+0.00%)
dctcp_update_alpha 252 320 +68 (+26.98%) 21 27 +6 (+28.57%)
dctcp_update_alpha 252 320 +68 (+26.98%) 21 27 +6 (+28.57%)
ned_ts_func 119 126 +7 (+5.88%) 6 7 +1 (+16.67%)
tw_egress 1119 1128 +9 (+0.80%) 95 96 +1 (+1.05%)
tw_ingress 1128 1137 +9 (+0.80%) 95 96 +1 (+1.05%)
tw_tproxy_router 4380 4465 +85 (+1.94%) 114 118 +4 (+3.51%)
tw_tproxy_router4 3093 3170 +77 (+2.49%) 83 88 +5 (+6.02%)
ttls_tc_ingress 34656 35717 +1061 (+3.06%) 936 970 +34 (+3.63%)
tw_twfw_egress 222327 222338 +11 (+0.00%) 10563 10564 +1 (+0.01%)
tw_twfw_ingress 78295 78299 +4 (+0.01%) 3825 3826 +1 (+0.03%)
tw_twfw_tc_eg 222839 222859 +20 (+0.01%) 10584 10585 +1 (+0.01%)
tw_twfw_tc_in 78295 78299 +4 (+0.01%) 3825 3826 +1 (+0.03%)
tw_twfw_egress 8080 8085 +5 (+0.06%) 456 456 +0 (+0.00%)
tw_twfw_ingress 8053 8056 +3 (+0.04%) 454 454 +0 (+0.00%)
tw_twfw_tc_eg 8154 8174 +20 (+0.25%) 456 457 +1 (+0.22%)
tw_twfw_tc_in 8060 8063 +3 (+0.04%) 455 455 +0 (+0.00%)
tw_twfw_egress 222327 222338 +11 (+0.00%) 10563 10564 +1 (+0.01%)
tw_twfw_ingress 78295 78299 +4 (+0.01%) 3825 3826 +1 (+0.03%)
tw_twfw_tc_eg 222839 222859 +20 (+0.01%) 10584 10585 +1 (+0.01%)
tw_twfw_tc_in 78295 78299 +4 (+0.01%) 3825 3826 +1 (+0.03%)
tw_twfw_egress 8080 8085 +5 (+0.06%) 456 456 +0 (+0.00%)
tw_twfw_ingress 8053 8056 +3 (+0.04%) 454 454 +0 (+0.00%)
tw_twfw_tc_eg 8154 8174 +20 (+0.25%) 456 457 +1 (+0.22%)
tw_twfw_tc_in 8060 8063 +3 (+0.04%) 455 455 +0 (+0.00%)
Looking into rbtree_search, the reason for such increase is that the
verifier has to explore the main loop shown below for one more iteration
until state pruning decides the current state is safe.
long rbtree_search(void *ctx)
{
...
bpf_spin_lock(&glock0);
rb_n = bpf_rbtree_root(&groot0);
while (can_loop) {
if (!rb_n) {
bpf_spin_unlock(&glock0);
return __LINE__;
}
n = rb_entry(rb_n, struct node_data, r0);
if (lookup_key == n->key0)
break;
if (nr_gc < NR_NODES)
gc_ns[nr_gc++] = rb_n;
if (lookup_key < n->key0)
rb_n = bpf_rbtree_left(&groot0, rb_n);
else
rb_n = bpf_rbtree_right(&groot0, rb_n);
}
...
}
Below is what the verifier sees at the start of each iteration
(65: may_goto) after preserving id of rb_n. Without id of rb_n, the
verifier stops exploring the loop at iter 16.
rb_n gc_ns[15]
iter 15 257 257
iter 16 290 257 rb_n: idmap add 257->290
gc_ns[15]: check 257 != 290 --> state not equal
iter 17 325 257 rb_n: idmap add 290->325
gc_ns[15]: idmap add 257->257 --> state safe
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
kernel/bpf/verifier.c | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 0891a3ac4d51..e1e95a066019 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -15268,15 +15268,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
mark_ptr_not_null_reg(reg);
- if (!reg_may_point_to_spin_lock(reg)) {
- /* For not-NULL ptr, reg->ref_obj_id will be reset
- * in release_reference().
- *
- * reg->id is still used by spin_lock ptr. Other
- * than spin_lock ptr type, reg->id can be reset.
- */
- reg->id = 0;
- }
+ /*
+ * reg->id is preserved for object relationship tracking
+ * and spin_lock lock state tracking
+ */
}
}
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH bpf-next v4 04/12] bpf: Preserve reg->id of pointer objects after null-check
2026-05-06 14:27 ` [PATCH bpf-next v4 04/12] bpf: Preserve reg->id of pointer objects after null-check Amery Hung
@ 2026-05-11 21:48 ` Eduard Zingerman
0 siblings, 0 replies; 22+ messages in thread
From: Eduard Zingerman @ 2026-05-11 21:48 UTC (permalink / raw)
To: Amery Hung, bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, memxor, martin.lau,
mykyta.yatsenko5, kernel-team
On Wed, 2026-05-06 at 07:27 -0700, Amery Hung wrote:
> Preserve reg->id of pointer objects after null-checking the register so
> that children objects derived from it can still refer to it in the new
> object relationship tracking mechanism introduced in a later patch. This
> change incurs a slight increase in the number of states in one selftest
> bpf object, rbtree_search.bpf.o. For Meta bpf objects, the increase of
> states is also negligible.
>
> Selftest BPF objects with insns_diff > 0
>
> Program Insns (A) Insns (B) Insns (DIFF) States (A) States (B) States (DIFF)
> ------------------------ --------- --------- -------------- ---------- ---------- -------------
> rbtree_search 6820 7326 +506 (+7.42%) 379 398 +19 (+5.01%)
>
> Meta BPF objects with insns_diff > 0
>
> Program Insns (A) Insns (B) Insns (DIFF) States (A) States (B) States (DIFF)
> ------------------------ --------- --------- -------------- ---------- ---------- -------------
> ned_imex_be_tclass 52 57 +5 (+9.62%) 5 6 +1 (+20.00%)
> ned_imex_be_tclass 52 57 +5 (+9.62%) 5 6 +1 (+20.00%)
> ned_skop_auto_flowlabel 523 526 +3 (+0.57%) 39 40 +1 (+2.56%)
> ned_skop_mss 289 292 +3 (+1.04%) 20 20 +0 (+0.00%)
> ned_skopt_bet_classifier 78 82 +4 (+5.13%) 8 8 +0 (+0.00%)
> dctcp_update_alpha 252 320 +68 (+26.98%) 21 27 +6 (+28.57%)
> dctcp_update_alpha 252 320 +68 (+26.98%) 21 27 +6 (+28.57%)
> ned_ts_func 119 126 +7 (+5.88%) 6 7 +1 (+16.67%)
> tw_egress 1119 1128 +9 (+0.80%) 95 96 +1 (+1.05%)
> tw_ingress 1128 1137 +9 (+0.80%) 95 96 +1 (+1.05%)
> tw_tproxy_router 4380 4465 +85 (+1.94%) 114 118 +4 (+3.51%)
> tw_tproxy_router4 3093 3170 +77 (+2.49%) 83 88 +5 (+6.02%)
> ttls_tc_ingress 34656 35717 +1061 (+3.06%) 936 970 +34 (+3.63%)
> tw_twfw_egress 222327 222338 +11 (+0.00%) 10563 10564 +1 (+0.01%)
> tw_twfw_ingress 78295 78299 +4 (+0.01%) 3825 3826 +1 (+0.03%)
> tw_twfw_tc_eg 222839 222859 +20 (+0.01%) 10584 10585 +1 (+0.01%)
> tw_twfw_tc_in 78295 78299 +4 (+0.01%) 3825 3826 +1 (+0.03%)
> tw_twfw_egress 8080 8085 +5 (+0.06%) 456 456 +0 (+0.00%)
> tw_twfw_ingress 8053 8056 +3 (+0.04%) 454 454 +0 (+0.00%)
> tw_twfw_tc_eg 8154 8174 +20 (+0.25%) 456 457 +1 (+0.22%)
> tw_twfw_tc_in 8060 8063 +3 (+0.04%) 455 455 +0 (+0.00%)
> tw_twfw_egress 222327 222338 +11 (+0.00%) 10563 10564 +1 (+0.01%)
> tw_twfw_ingress 78295 78299 +4 (+0.01%) 3825 3826 +1 (+0.03%)
> tw_twfw_tc_eg 222839 222859 +20 (+0.01%) 10584 10585 +1 (+0.01%)
> tw_twfw_tc_in 78295 78299 +4 (+0.01%) 3825 3826 +1 (+0.03%)
> tw_twfw_egress 8080 8085 +5 (+0.06%) 456 456 +0 (+0.00%)
> tw_twfw_ingress 8053 8056 +3 (+0.04%) 454 454 +0 (+0.00%)
> tw_twfw_tc_eg 8154 8174 +20 (+0.25%) 456 457 +1 (+0.22%)
> tw_twfw_tc_in 8060 8063 +3 (+0.04%) 455 455 +0 (+0.00%)
>
> Looking into rbtree_search, the reason for such increase is that the
> verifier has to explore the main loop shown below for one more iteration
> until state pruning decides the current state is safe.
>
> long rbtree_search(void *ctx)
> {
> ...
> bpf_spin_lock(&glock0);
> rb_n = bpf_rbtree_root(&groot0);
> while (can_loop) {
> if (!rb_n) {
> bpf_spin_unlock(&glock0);
> return __LINE__;
> }
>
> n = rb_entry(rb_n, struct node_data, r0);
> if (lookup_key == n->key0)
> break;
> if (nr_gc < NR_NODES)
> gc_ns[nr_gc++] = rb_n;
> if (lookup_key < n->key0)
> rb_n = bpf_rbtree_left(&groot0, rb_n);
> else
> rb_n = bpf_rbtree_right(&groot0, rb_n);
> }
> ...
> }
>
> Below is what the verifier sees at the start of each iteration
> (65: may_goto) after preserving id of rb_n. Without id of rb_n, the
> verifier stops exploring the loop at iter 16.
>
> rb_n gc_ns[15]
> iter 15 257 257
>
> iter 16 290 257 rb_n: idmap add 257->290
> gc_ns[15]: check 257 != 290 --> state not equal
>
> iter 17 325 257 rb_n: idmap add 290->325
> gc_ns[15]: idmap add 257->257 --> state safe
>
> Acked-by: Andrii Nakryiko <andrii@kernel.org>
> Signed-off-by: Amery Hung <ameryhung@gmail.com>
> ---
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
[...]
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH bpf-next v4 05/12] bpf: Refactor object relationship tracking and fix dynptr UAF bug
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
` (3 preceding siblings ...)
2026-05-06 14:27 ` [PATCH bpf-next v4 04/12] bpf: Preserve reg->id of pointer objects after null-check Amery Hung
@ 2026-05-06 14:27 ` Amery Hung
2026-05-06 15:27 ` bot+bpf-ci
2026-05-06 14:27 ` [PATCH bpf-next v4 06/12] bpf: Remove redundant dynptr arg check for helper Amery Hung
` (6 subsequent siblings)
11 siblings, 1 reply; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:27 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
Refactor object relationship tracking in the verifier and fix a dynptr
use-after-free bug where file/skb dynptrs are not invalidated when the
parent referenced object is freed.
Add parent_id to bpf_reg_state to precisely track child-parent
relationships. A child object's parent_id points to the parent object's
id. This replaces the PTR_TO_MEM-specific dynptr_id and does not
increase the size of bpf_reg_state on 64-bit machines as there is
existing padding.
When calling dynptr constructors (i.e., process_dynptr_func() with
MEM_UNINIT argument), track the parent's id if the parent is a
referenced object. This only applies to file dynptr and skb dynptr,
so only pass parent reg->id to kfunc constructors.
For release_reference(), invalidating an object now also invalidates
all descendants by traversing the object tree. This is done using
stack-based DFS to avoid recursive call chains of release_reference() ->
unmark_stack_slots_dynptr() -> release_reference(). Referenced objects
encountered during tree traversal cannot be indirectly released. They
require an explicit helper/kfunc call to release the acquired resources.
While the new design changes how object relationships are tracked in
the verifier, it does not change the verifier's behavior. Here is the
implication for dynptr, pointer casting, and owning/non-owning
references:
Dynptr:
When initializing a dynptr, referenced dynptrs acquire a reference for
ref_obj_id. If the dynptr has a referenced parent, parent_id tracks the
parent's id. When cloning, ref_obj_id and parent_id are copied from the
original. Releasing a referenced dynptr via release_reference(ref_obj_id)
invalidates all clones and derived slices. For non-referenced dynptrs,
only the specific dynptr and its children are invalidated.
Pointer casting:
Referenced socket pointers and their casted counterparts share the same
lifetime but have different nullness — they have different id but the
same ref_obj_id.
Owning to non-owning reference conversion:
After converting owning to non-owning by clearing ref_obj_id (e.g.,
object(id=1, ref_obj_id=1) -> object(id=1, ref_obj_id=0)), the
verifier only needs to release the reference state, so it calls
release_reference_nomark() instead of release_reference().
Note that the error message "reference has not been acquired before" in
the helper and kfunc release paths is removed. This message was already
unreachable. The verifier only calls release_reference() after
confirming meta.ref_obj_id is valid, so the condition could never
trigger in practice (no selftest exercises it either). With the
refactor, release_reference() can now be called with non-acquired ids
and have different error conditions. Report directly in
release_reference() instead.
Fixes: 870c28588afa ("bpf: net_sched: Add basic bpf qdisc kfuncs")
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
include/linux/bpf_verifier.h | 23 +-
kernel/bpf/log.c | 8 +-
kernel/bpf/states.c | 10 +-
kernel/bpf/verifier.c | 332 +++++++++---------
.../testing/selftests/bpf/progs/dynptr_fail.c | 18 +-
5 files changed, 203 insertions(+), 188 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index f330e9cf297e..51d5f5dd6e5b 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -66,7 +66,6 @@ struct bpf_reg_state {
struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
u32 mem_size;
- u32 dynptr_id; /* for dynptr slices */
};
/* For dynptr stack slots */
@@ -188,6 +187,13 @@ struct bpf_reg_state {
* allowed and has the same effect as bpf_sk_release(sk).
*/
u32 ref_obj_id;
+ /* Tracks the parent object this register was derived from.
+ * Used for cascading invalidation: when the parent object is
+ * released or invalidated, all registers with matching parent_id
+ * are also invalidated. For example, a slice from bpf_dynptr_data()
+ * gets parent_id set to the dynptr's id.
+ */
+ u32 parent_id;
/* Inside the callee two registers can be both PTR_TO_STACK like
* R1=fp-8 and R2=fp-8, but one of them points to this function stack
* while another to the caller's stack. To differentiate them 'frameno'
@@ -563,7 +569,7 @@ struct bpf_verifier_state {
iter < frame->allocated_stack / BPF_REG_SIZE; \
iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
-#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
+#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __stack, __mask, __expr) \
({ \
struct bpf_verifier_state *___vstate = __vst; \
int ___i, ___j; \
@@ -571,6 +577,7 @@ struct bpf_verifier_state {
struct bpf_reg_state *___regs; \
__state = ___vstate->frame[___i]; \
___regs = __state->regs; \
+ __stack = NULL; \
for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
__reg = &___regs[___j]; \
(void)(__expr); \
@@ -578,14 +585,20 @@ struct bpf_verifier_state {
bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
if (!__reg) \
continue; \
+ __stack = &__state->stack[___j]; \
(void)(__expr); \
} \
} \
})
/* Invoke __expr over regsiters in __vst, setting __state and __reg */
-#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
- bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
+#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
+ ({ \
+ struct bpf_stack_state * ___stack; \
+ (void)___stack; \
+ bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, ___stack,\
+ 1 << STACK_SPILL, __expr); \
+ })
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
@@ -1381,6 +1394,7 @@ struct bpf_dynptr_desc {
enum bpf_dynptr_type type;
u32 id;
u32 ref_obj_id;
+ u32 parent_id;
};
struct bpf_kfunc_call_arg_meta {
@@ -1392,6 +1406,7 @@ struct bpf_kfunc_call_arg_meta {
const char *func_name;
/* Out parameters */
u32 ref_obj_id;
+ u32 id;
u8 release_regno;
bool r0_rdonly;
u32 ret_btf_id;
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index 64566b86dd27..8e35fc254060 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -667,6 +667,8 @@ static void print_reg_state(struct bpf_verifier_env *env,
verbose(env, "%+d", reg->delta);
if (reg->ref_obj_id)
verbose_a("ref_obj_id=%d", reg->ref_obj_id);
+ if (reg->parent_id)
+ verbose_a("parent_id=%d", reg->parent_id);
if (type_is_non_owning_ref(reg->type))
verbose_a("%s", "non_own_ref");
if (type_is_map_ptr(t)) {
@@ -769,9 +771,9 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifie
if (reg->id)
verbose_a("id=%d", reg->id);
if (reg->ref_obj_id)
- verbose_a("ref_id=%d", reg->ref_obj_id);
- if (reg->dynptr_id)
- verbose_a("dynptr_id=%d", reg->dynptr_id);
+ verbose_a("ref_obj_id=%d", reg->ref_obj_id);
+ if (reg->parent_id)
+ verbose_a("parent_id=%d", reg->parent_id);
verbose(env, ")");
break;
case STACK_ITER:
diff --git a/kernel/bpf/states.c b/kernel/bpf/states.c
index bd9c22945050..fa31ba80d534 100644
--- a/kernel/bpf/states.c
+++ b/kernel/bpf/states.c
@@ -489,7 +489,8 @@ static bool regs_exact(const struct bpf_reg_state *rold,
{
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
check_ids(rold->id, rcur->id, idmap) &&
- check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
+ check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap) &&
+ check_ids(rold->parent_id, rcur->parent_id, idmap);
}
enum exact_level {
@@ -614,7 +615,8 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off) &&
check_ids(rold->id, rcur->id, idmap) &&
- check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
+ check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap) &&
+ check_ids(rold->parent_id, rcur->parent_id, idmap);
case PTR_TO_PACKET_META:
case PTR_TO_PACKET:
/* We must have at least as much range as the old ptr
@@ -794,7 +796,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
cur_reg = &cur->stack[spi].spilled_ptr;
if (old_reg->dynptr.type != cur_reg->dynptr.type ||
old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
- !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
+ !check_ids(old_reg->id, cur_reg->id, idmap) ||
+ !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap) ||
+ !check_ids(old_reg->parent_id, cur_reg->parent_id, idmap))
return false;
break;
case STACK_ITER:
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e1e95a066019..7eeb3d8fc817 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -202,7 +202,7 @@ struct bpf_verifier_stack_elem {
static int acquire_reference(struct bpf_verifier_env *env, int insn_idx);
static int release_reference_nomark(struct bpf_verifier_state *state, int ref_obj_id);
-static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
+static int release_reference(struct bpf_verifier_env *env, int id);
static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
static int ref_set_non_owning(struct bpf_verifier_env *env,
@@ -242,6 +242,7 @@ struct bpf_call_arg_meta {
int mem_size;
u64 msize_max_value;
int ref_obj_id;
+ u32 id;
int func_id;
struct btf *btf;
u32 btf_id;
@@ -634,14 +635,14 @@ static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type)
}
}
-static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
+static bool dynptr_type_referenced(enum bpf_dynptr_type type)
{
return type == BPF_DYNPTR_TYPE_RINGBUF || type == BPF_DYNPTR_TYPE_FILE;
}
static void __mark_dynptr_reg(struct bpf_reg_state *reg,
enum bpf_dynptr_type type,
- bool first_slot, int dynptr_id);
+ bool first_slot, int id);
static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
@@ -666,11 +667,12 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi);
static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- enum bpf_arg_type arg_type, int insn_idx, int clone_ref_obj_id)
+ enum bpf_arg_type arg_type, int insn_idx, int parent_id,
+ struct bpf_dynptr_desc *dynptr)
{
struct bpf_func_state *state = bpf_func(env, reg);
+ int spi, i, err, ref_obj_id = 0;
enum bpf_dynptr_type type;
- int spi, i, err;
spi = dynptr_get_spi(env, reg);
if (spi < 0)
@@ -704,82 +706,56 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr,
&state->stack[spi - 1].spilled_ptr, type);
- if (dynptr_type_refcounted(type)) {
- /* The id is used to track proper releasing */
- int id;
-
- if (clone_ref_obj_id)
- id = clone_ref_obj_id;
- else
- id = acquire_reference(env, insn_idx);
-
- if (id < 0)
- return id;
-
- state->stack[spi].spilled_ptr.ref_obj_id = id;
- state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
+ if (dynptr->type == BPF_DYNPTR_TYPE_INVALID) { /* dynptr constructors */
+ if (dynptr_type_referenced(type)) {
+ ref_obj_id = acquire_reference(env, insn_idx);
+ if (ref_obj_id < 0)
+ return ref_obj_id;
+ }
+ /* Track parent's id if the parent is a referenced object */
+ } else { /* bpf_dynptr_clone() */
+ ref_obj_id = dynptr->ref_obj_id;
+ parent_id = dynptr->parent_id;
}
+ state->stack[spi].spilled_ptr.ref_obj_id = ref_obj_id;
+ state->stack[spi - 1].spilled_ptr.ref_obj_id = ref_obj_id;
+ state->stack[spi].spilled_ptr.parent_id = parent_id;
+ state->stack[spi - 1].spilled_ptr.parent_id = parent_id;
+
return 0;
}
-static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi)
+static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_stack_state *stack)
{
int i;
for (i = 0; i < BPF_REG_SIZE; i++) {
- state->stack[spi].slot_type[i] = STACK_INVALID;
- state->stack[spi - 1].slot_type[i] = STACK_INVALID;
+ stack[0].slot_type[i] = STACK_INVALID;
+ stack[1].slot_type[i] = STACK_INVALID;
}
- bpf_mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
- bpf_mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
+ bpf_mark_reg_not_init(env, &stack[0].spilled_ptr);
+ bpf_mark_reg_not_init(env, &stack[1].spilled_ptr);
}
static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
struct bpf_func_state *state = bpf_func(env, reg);
- int spi, ref_obj_id, i;
+ int spi;
spi = dynptr_get_spi(env, reg);
if (spi < 0)
return spi;
- if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
- invalidate_dynptr(env, state, spi);
- return 0;
- }
-
- ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id;
-
- /* If the dynptr has a ref_obj_id, then we need to invalidate
- * two things:
- *
- * 1) Any dynptrs with a matching ref_obj_id (clones)
- * 2) Any slices derived from this dynptr.
+ /*
+ * For referenced dynptr, the clones share the same ref_obj_id and will be
+ * invalidated too. For non-referenced dynptr, only the dynptr and slices
+ * derived from it will be invalidated.
*/
-
- /* Invalidate any slices associated with this dynptr */
- WARN_ON_ONCE(release_reference(env, ref_obj_id));
-
- /* Invalidate any dynptr clones */
- for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id)
- continue;
-
- /* it should always be the case that if the ref obj id
- * matches then the stack slot also belongs to a
- * dynptr
- */
- if (state->stack[i].slot_type[0] != STACK_DYNPTR) {
- verifier_bug(env, "misconfigured ref_obj_id");
- return -EFAULT;
- }
- if (state->stack[i].spilled_ptr.dynptr.first_slot)
- invalidate_dynptr(env, state, i);
- }
-
- return 0;
+ reg = &state->stack[spi].spilled_ptr;
+ return release_reference(env, dynptr_type_referenced(reg->dynptr.type) ?
+ reg->ref_obj_id : reg->id);
}
static void __mark_reg_unknown(const struct bpf_verifier_env *env,
@@ -793,13 +769,25 @@ static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_
__mark_reg_unknown(env, reg);
}
+static int dynptr_get_refcnt(struct bpf_func_state *state, u32 ref_obj_id)
+{
+ int i, ref_cnt = 0;
+
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+ if (state->stack[i].slot_type[0] != STACK_DYNPTR)
+ continue;
+ if (!state->stack[i].spilled_ptr.dynptr.first_slot)
+ continue;
+ if (state->stack[i].spilled_ptr.ref_obj_id == ref_obj_id)
+ ref_cnt++;
+ }
+
+ return ref_cnt;
+}
+
static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi)
{
- struct bpf_func_state *fstate;
- struct bpf_reg_state *dreg;
- int i, dynptr_id;
-
/* We always ensure that STACK_DYNPTR is never set partially,
* hence just checking for slot_type[0] is enough. This is
* different for STACK_SPILL, where it may be only set for
@@ -812,56 +800,11 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
spi = spi + 1;
- if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
- int ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id;
- int ref_cnt = 0;
-
- /*
- * A referenced dynptr can be overwritten only if there is at
- * least one other dynptr sharing the same ref_obj_id,
- * ensuring the reference can still be properly released.
- */
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] != STACK_DYNPTR)
- continue;
- if (!state->stack[i].spilled_ptr.dynptr.first_slot)
- continue;
- if (state->stack[i].spilled_ptr.ref_obj_id == ref_obj_id)
- ref_cnt++;
- }
-
- if (ref_cnt <= 1) {
- verbose(env, "cannot overwrite referenced dynptr\n");
- return -EINVAL;
- }
- }
-
mark_stack_slot_scratched(env, spi);
mark_stack_slot_scratched(env, spi - 1);
- /* Writing partially to one dynptr stack slot destroys both. */
- for (i = 0; i < BPF_REG_SIZE; i++) {
- state->stack[spi].slot_type[i] = STACK_INVALID;
- state->stack[spi - 1].slot_type[i] = STACK_INVALID;
- }
-
- dynptr_id = state->stack[spi].spilled_ptr.id;
- /* Invalidate any slices associated with this dynptr */
- bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({
- /* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
- if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM)
- continue;
- if (dreg->dynptr_id == dynptr_id)
- mark_reg_invalid(env, dreg);
- }));
-
- /* Do not release reference state, we are destroying dynptr on stack,
- * not using some helper to release it. Just reset register.
- */
- bpf_mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
- bpf_mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
-
- return 0;
+ /* Invalidate the dynptr and any derived slices */
+ return release_reference(env, state->stack[spi].spilled_ptr.id);
}
static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
@@ -1480,15 +1423,15 @@ static void release_reference_state(struct bpf_verifier_state *state, int idx)
return;
}
-static bool find_reference_state(struct bpf_verifier_state *state, int ptr_id)
+static struct bpf_reference_state *find_reference_state(struct bpf_verifier_state *state, int ptr_id)
{
int i;
for (i = 0; i < state->acquired_refs; i++)
if (state->refs[i].id == ptr_id)
- return true;
+ return &state->refs[i];
- return false;
+ return NULL;
}
static int release_lock_state(struct bpf_verifier_state *state, int type, int id, void *ptr)
@@ -1804,6 +1747,7 @@ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
reg->id = 0;
reg->ref_obj_id = 0;
+ reg->parent_id = 0;
___mark_reg_known(reg, imm);
}
@@ -1838,7 +1782,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
}
static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
- bool first_slot, int dynptr_id)
+ bool first_slot, int id)
{
/* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
* callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
@@ -1847,7 +1791,7 @@ static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type ty
__mark_reg_known_zero(reg);
reg->type = CONST_PTR_TO_DYNPTR;
/* Give each dynptr a unique id to uniquely associate slices to it. */
- reg->id = dynptr_id;
+ reg->id = id;
reg->dynptr.type = type;
reg->dynptr.first_slot = first_slot;
}
@@ -2127,17 +2071,12 @@ static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
/* Mark a register as having a completely unknown (scalar) value. */
void bpf_mark_reg_unknown_imprecise(struct bpf_reg_state *reg)
{
- /*
- * Clear type, off, and union(map_ptr, range) and
- * padding between 'type' and union
- */
- memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
+ s32 subreg_def = reg->subreg_def;
+
+ memset(reg, 0, sizeof(*reg));
reg->type = SCALAR_VALUE;
- reg->id = 0;
- reg->ref_obj_id = 0;
reg->var_off = tnum_unknown;
- reg->frameno = 0;
- reg->precise = false;
+ reg->subreg_def = subreg_def;
__mark_reg_unbounded(reg);
}
@@ -7018,7 +6957,16 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno,
return 0;
}
-/* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK
+/*
+ * Validate dynptr arguments for helper, kfunc and subprog.
+ *
+ * @dynptr is both input and output. It is populated when the argument is
+ * tagged with MEM_UNINIT (i.e., the dynptr argument that will be constructed)
+ * and consumed when the argument is expecting to be an initialized dynptr.
+ * @parent_id is used to track the referenced parent object (e.g., file or skb in
+ * qdisc program) when constructing a dynptr.
+ *
+ * There are two register types representing a bpf_dynptr, one is PTR_TO_STACK
* which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR.
*
* In both cases we deal with the first 8 bytes, but need to mark the next 8
@@ -7034,7 +6982,7 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno,
*/
static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
argno_t argno, int insn_idx, enum bpf_arg_type arg_type,
- int clone_ref_obj_id, struct bpf_dynptr_desc *dynptr)
+ int parent_id, struct bpf_dynptr_desc *dynptr)
{
int spi, err = 0;
@@ -7075,7 +7023,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_stat
return err;
}
- err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, clone_ref_obj_id);
+ err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, parent_id, dynptr);
} else /* OBJ_RELEASE and None case from above */ {
/* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
if (reg->type == CONST_PTR_TO_DYNPTR && (arg_type & OBJ_RELEASE)) {
@@ -7118,6 +7066,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_stat
dynptr->type = reg->dynptr.type;
dynptr->id = reg->id;
dynptr->ref_obj_id = reg->ref_obj_id;
+ dynptr->parent_id = reg->parent_id;
}
}
return err;
@@ -8077,7 +8026,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
*/
if (reg->type == PTR_TO_STACK) {
spi = dynptr_get_spi(env, reg);
- if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) {
+ if (spi < 0 || !state->stack[spi].spilled_ptr.id) {
verbose(env, "arg %d is an unacquired reference\n", regno);
return -EINVAL;
}
@@ -8105,6 +8054,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
return -EACCES;
}
meta->ref_obj_id = reg->ref_obj_id;
+ meta->id = reg->id;
}
switch (base_type(arg_type)) {
@@ -8730,26 +8680,87 @@ static int release_reference_nomark(struct bpf_verifier_state *state, int ref_ob
return -EINVAL;
}
-/* The pointer with the specified id has released its reference to kernel
- * resources. Identify all copies of the same pointer and clear the reference.
- *
- * This is the release function corresponding to acquire_reference(). Idempotent.
- */
-static int release_reference(struct bpf_verifier_env *env, int ref_obj_id)
+static int idstack_push(struct bpf_idmap *idmap, u32 id)
+{
+ int i;
+
+ if (!id)
+ return 0;
+
+ for (i = 0; i < idmap->cnt; i++)
+ if (idmap->map[i].old == id)
+ return 0;
+
+ if (WARN_ON_ONCE(idmap->cnt >= BPF_ID_MAP_SIZE))
+ return -EFAULT;
+
+ idmap->map[idmap->cnt++].old = id;
+ return 0;
+}
+
+static int idstack_pop(struct bpf_idmap *idmap)
{
+ if (!idmap->cnt)
+ return 0;
+
+ return idmap->map[--idmap->cnt].old;
+}
+
+/* Release id and objects referencing the id iteratively in a DFS manner */
+static int release_reference(struct bpf_verifier_env *env, int id)
+{
+ u32 mask = (1 << STACK_SPILL) | (1 << STACK_DYNPTR);
struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_idmap *idstack = &env->idmap_scratch;
+ struct bpf_stack_state *stack;
struct bpf_func_state *state;
struct bpf_reg_state *reg;
- int err;
+ int root_id = id, err;
- err = release_reference_nomark(vstate, ref_obj_id);
- if (err)
- return err;
+ idstack->cnt = 0;
+ idstack_push(idstack, id);
- bpf_for_each_reg_in_vstate(vstate, state, reg, ({
- if (reg->ref_obj_id == ref_obj_id)
- mark_reg_invalid(env, reg);
- }));
+ if (find_reference_state(vstate, id))
+ WARN_ON_ONCE(release_reference_nomark(vstate, id));
+
+ while ((id = idstack_pop(idstack))) {
+ bpf_for_each_reg_in_vstate_mask(vstate, state, reg, stack, mask, ({
+ int ref_obj_cnt = 1;
+
+ if (reg->id != id && reg->parent_id != id && reg->ref_obj_id != id)
+ continue;
+
+ /*
+ * A referenced dynptr can be overwritten only if there is at
+ * least one other dynptr sharing the same ref_obj_id,
+ * ensuring the reference can still be properly released.
+ */
+ if (stack && stack->slot_type[BPF_REG_SIZE - 1] == STACK_DYNPTR &&
+ dynptr_type_referenced(reg->dynptr.type))
+ ref_obj_cnt = dynptr_get_refcnt(state, reg->ref_obj_id);
+
+ if (reg->ref_obj_id && reg->ref_obj_id != root_id && ref_obj_cnt <= 1) {
+ struct bpf_reference_state *ref_state;
+
+ ref_state = find_reference_state(env->cur_state, reg->ref_obj_id);
+ verbose(env, "Leaking reference id=%d alloc_insn=%d. Release it first.\n",
+ ref_state->id, ref_state->insn_idx);
+ return -EINVAL;
+ }
+
+ /* Free objects derived from the current object */
+ if (reg->id != id) {
+ err = idstack_push(idstack, reg->id);
+ if (err)
+ return err;
+ }
+
+ if (!stack || stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL)
+ mark_reg_invalid(env, reg);
+ else if (stack->slot_type[BPF_REG_SIZE - 1] == STACK_DYNPTR)
+ invalidate_dynptr(env, stack);
+ }));
+ }
return 0;
}
@@ -9914,11 +9925,8 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
*/
err = 0;
}
- if (err) {
- verbose(env, "func %s#%d reference has not been acquired before\n",
- func_id_name(func_id), func_id);
+ if (err)
return err;
- }
}
switch (func_id) {
@@ -10196,10 +10204,8 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
regs[BPF_REG_0].ref_obj_id = id;
}
- if (func_id == BPF_FUNC_dynptr_data) {
- regs[BPF_REG_0].dynptr_id = meta.dynptr.id;
- regs[BPF_REG_0].ref_obj_id = meta.dynptr.ref_obj_id;
- }
+ if (func_id == BPF_FUNC_dynptr_data)
+ regs[BPF_REG_0].parent_id = meta.dynptr.id;
err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta);
if (err)
@@ -11636,6 +11642,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EFAULT;
}
meta->ref_obj_id = reg->ref_obj_id;
+ meta->id = reg->id;
if (is_kfunc_release(meta))
meta->release_regno = regno;
}
@@ -11778,7 +11785,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_DYNPTR:
{
enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR;
- int clone_ref_obj_id = 0;
if (is_kfunc_arg_uninit(btf, &args[i]))
dynptr_arg_type |= MEM_UNINIT;
@@ -11804,15 +11810,10 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
dynptr_arg_type |= (unsigned int)get_dynptr_type_flag(parent_type);
- clone_ref_obj_id = meta->dynptr.ref_obj_id;
- if (dynptr_type_refcounted(parent_type) && !clone_ref_obj_id) {
- verifier_bug(env, "missing ref obj id for parent of clone");
- return -EFAULT;
- }
}
ret = process_dynptr_func(env, reg, argno, insn_idx, dynptr_arg_type,
- clone_ref_obj_id, &meta->dynptr);
+ meta->ref_obj_id ? meta->id : 0, &meta->dynptr);
if (ret < 0)
return ret;
break;
@@ -12465,12 +12466,7 @@ static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_ca
verifier_bug(env, "no dynptr id");
return -EFAULT;
}
- regs[BPF_REG_0].dynptr_id = meta->dynptr.id;
-
- /* we don't need to set BPF_REG_0's ref obj id
- * because packet slices are not refcounted (see
- * dynptr_type_refcounted)
- */
+ regs[BPF_REG_0].parent_id = meta->dynptr.id;
} else {
return 0;
}
@@ -12605,6 +12601,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (rcu_lock) {
env->cur_state->active_rcu_locks++;
} else if (rcu_unlock) {
+ struct bpf_stack_state *stack;
struct bpf_func_state *state;
struct bpf_reg_state *reg;
u32 clear_mask = (1 << STACK_SPILL) | (1 << STACK_ITER);
@@ -12614,7 +12611,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return -EINVAL;
}
if (--env->cur_state->active_rcu_locks == 0) {
- bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, clear_mask, ({
+ bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, stack, clear_mask, ({
if (reg->type & MEM_RCU) {
reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL);
reg->type |= PTR_UNTRUSTED;
@@ -12657,9 +12654,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
err = unmark_stack_slots_dynptr(env, reg);
} else {
err = release_reference(env, reg->ref_obj_id);
- if (err)
- verbose(env, "kfunc %s#%d reference has not been acquired before\n",
- func_name, meta.func_id);
}
if (err)
return err;
@@ -12676,7 +12670,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return err;
}
- err = release_reference(env, release_ref_obj_id);
+ err = release_reference_nomark(env->cur_state, release_ref_obj_id);
if (err) {
verbose(env, "kfunc %s#%d reference has not been acquired before\n",
func_name, meta.func_id);
@@ -12766,7 +12760,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
/* Ensures we don't access the memory after a release_reference() */
if (meta.ref_obj_id)
- regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
+ regs[BPF_REG_0].parent_id = meta.ref_obj_id;
if (is_kfunc_rcu_protected(&meta))
regs[BPF_REG_0].type |= MEM_RCU;
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index dbd97add5a5a..b141cf75cc6a 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -471,7 +471,7 @@ int invalid_write1(void *ctx)
* offset
*/
SEC("?raw_tp")
-__failure __msg("cannot overwrite referenced dynptr")
+__failure __msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.")
int invalid_write2(void *ctx)
{
struct bpf_dynptr ptr;
@@ -495,7 +495,7 @@ int invalid_write2(void *ctx)
* non-const offset
*/
SEC("?raw_tp")
-__failure __msg("cannot overwrite referenced dynptr")
+__failure __msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.")
int invalid_write3(void *ctx)
{
struct bpf_dynptr ptr;
@@ -527,7 +527,7 @@ static int invalid_write4_callback(__u32 index, void *data)
* be invalidated as a dynptr
*/
SEC("?raw_tp")
-__failure __msg("cannot overwrite referenced dynptr")
+__failure __msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.")
int invalid_write4(void *ctx)
{
struct bpf_dynptr ptr;
@@ -706,7 +706,7 @@ int dynptr_from_mem_invalid_api(void *ctx)
}
SEC("?tc")
-__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+__failure __msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.") __log_level(2)
int dynptr_pruning_overwrite(struct __sk_buff *ctx)
{
asm volatile (
@@ -768,7 +768,7 @@ int dynptr_pruning_stacksafe(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+__failure __msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.") __log_level(2)
int dynptr_pruning_type_confusion(struct __sk_buff *ctx)
{
asm volatile (
@@ -880,7 +880,7 @@ int dynptr_var_off_overwrite(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+__failure __msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.") __log_level(2)
int dynptr_partial_slot_invalidate(struct __sk_buff *ctx)
{
asm volatile (
@@ -1056,7 +1056,7 @@ int dynptr_invalidate_slice_success(void *ctx)
/* Overwriting referenced dynptr should be rejected */
SEC("?raw_tp")
-__failure __msg("cannot overwrite referenced dynptr")
+__failure __msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.")
int dynptr_overwrite_ref(void *ctx)
{
struct bpf_dynptr ptr;
@@ -1708,7 +1708,7 @@ int clone_invalid1(void *ctx)
/* Can't overwrite an existing dynptr when cloning */
SEC("?xdp")
-__failure __msg("cannot overwrite referenced dynptr")
+__failure __msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.")
int clone_invalid2(struct xdp_md *xdp)
{
struct bpf_dynptr ptr1;
@@ -2015,7 +2015,7 @@ int dynptr_overwrite_ref_with_clone(void *ctx)
/* Overwriting the last referenced dynptr should still be rejected */
SEC("?raw_tp")
-__failure __msg("cannot overwrite referenced dynptr")
+__failure __msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.")
int dynptr_overwrite_ref_last_clone(void *ctx)
{
struct bpf_dynptr ptr, clone;
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH bpf-next v4 05/12] bpf: Refactor object relationship tracking and fix dynptr UAF bug
2026-05-06 14:27 ` [PATCH bpf-next v4 05/12] bpf: Refactor object relationship tracking and fix dynptr UAF bug Amery Hung
@ 2026-05-06 15:27 ` bot+bpf-ci
2026-05-07 12:20 ` Amery Hung
0 siblings, 1 reply; 22+ messages in thread
From: bot+bpf-ci @ 2026-05-06 15:27 UTC (permalink / raw)
To: ameryhung, bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team, ast, andrii,
daniel, martin.lau, eddyz87, yonghong.song, clm, ihor.solodrai
[-- Attachment #1: Type: text/plain, Size: 1309 bytes --]
> diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
> index f330e9cf297e..51d5f5dd6e5b 100644
> --- a/include/linux/bpf_verifier.h
> +++ b/include/linux/bpf_verifier.h
[ ... ]
> @@ -1381,6 +1394,7 @@ struct bpf_dynptr_desc {
> enum bpf_dynptr_type type;
> u32 id;
> u32 ref_obj_id;
> + u32 parent_id;
> };
Eduard Zingerman raised a suggestion in v3 about adding documentation
comments to struct bpf_dynptr_desc. The structure still lacks comments
explaining when the id, ref_obj_id, and parent_id fields are populated.
Would it help readers to document when these fields are set?
>
> struct bpf_kfunc_call_arg_meta {
> @@ -1392,6 +1406,7 @@ struct bpf_kfunc_call_arg_meta {
> const char *func_name;
> /* Out parameters */
> u32 ref_obj_id;
> + u32 id;
> u8 release_regno;
> bool r0_rdonly;
> u32 ret_btf_id;
Eduard Zingerman suggested in v3 adding a comment for the new id field
in struct bpf_kfunc_call_arg_meta. The field remains undocumented. Would
a comment clarifying its purpose improve code readability?
[ ... ]
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/25442456702
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH bpf-next v4 05/12] bpf: Refactor object relationship tracking and fix dynptr UAF bug
2026-05-06 15:27 ` bot+bpf-ci
@ 2026-05-07 12:20 ` Amery Hung
0 siblings, 0 replies; 22+ messages in thread
From: Amery Hung @ 2026-05-07 12:20 UTC (permalink / raw)
To: bot+bpf-ci
Cc: bpf, netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, kernel-team, ast, yonghong.song,
clm, ihor.solodrai
On Wed, May 6, 2026 at 4:27 PM <bot+bpf-ci@kernel.org> wrote:
>
> > diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
> > index f330e9cf297e..51d5f5dd6e5b 100644
> > --- a/include/linux/bpf_verifier.h
> > +++ b/include/linux/bpf_verifier.h
>
> [ ... ]
>
> > @@ -1381,6 +1394,7 @@ struct bpf_dynptr_desc {
> > enum bpf_dynptr_type type;
> > u32 id;
> > u32 ref_obj_id;
> > + u32 parent_id;
> > };
>
> Eduard Zingerman raised a suggestion in v3 about adding documentation
> comments to struct bpf_dynptr_desc. The structure still lacks comments
> explaining when the id, ref_obj_id, and parent_id fields are populated.
> Would it help readers to document when these fields are set?
>
> >
> > struct bpf_kfunc_call_arg_meta {
> > @@ -1392,6 +1406,7 @@ struct bpf_kfunc_call_arg_meta {
> > const char *func_name;
> > /* Out parameters */
> > u32 ref_obj_id;
> > + u32 id;
> > u8 release_regno;
> > bool r0_rdonly;
> > u32 ret_btf_id;
>
> Eduard Zingerman suggested in v3 adding a comment for the new id field
> in struct bpf_kfunc_call_arg_meta. The field remains undocumented. Would
> a comment clarifying its purpose improve code readability?
The readability should be improved in patch 7 where ref_obj_id and id
are moved into ref_obj_desc.
For the meaning of id and ref_obj_id, we should keep it in where they
are defined: bpf_verifier.h
>
> [ ... ]
>
>
> ---
> AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
> See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
>
> CI run summary: https://github.com/kernel-patches/bpf/actions/runs/25442456702
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH bpf-next v4 06/12] bpf: Remove redundant dynptr arg check for helper
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
` (4 preceding siblings ...)
2026-05-06 14:27 ` [PATCH bpf-next v4 05/12] bpf: Refactor object relationship tracking and fix dynptr UAF bug Amery Hung
@ 2026-05-06 14:27 ` Amery Hung
2026-05-06 14:27 ` [PATCH bpf-next v4 07/12] bpf: Unify referenced object tracking in verifier Amery Hung
` (5 subsequent siblings)
11 siblings, 0 replies; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:27 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
unmark_stack_slots_dynptr() already makes sure that CONST_PTR_TO_DYNPTR
cannot be released. process_dynptr_func() also prevents passing
uninitialized dynptr to helpers expecting initialized dynptr. Now that
unmark_stack_slots_dynptr() also error returned from
release_reference(), there should be no reason to keep these redundant
checks.
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
kernel/bpf/verifier.c | 21 +------------------
.../testing/selftests/bpf/progs/dynptr_fail.c | 6 +++---
.../selftests/bpf/progs/user_ringbuf_fail.c | 4 ++--
3 files changed, 6 insertions(+), 25 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 7eeb3d8fc817..792e61e07b06 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -8015,26 +8015,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
skip_type_check:
if (arg_type_is_release(arg_type)) {
- if (arg_type_is_dynptr(arg_type)) {
- struct bpf_func_state *state = bpf_func(env, reg);
- int spi;
-
- /* Only dynptr created on stack can be released, thus
- * the get_spi and stack state checks for spilled_ptr
- * should only be done before process_dynptr_func for
- * PTR_TO_STACK.
- */
- if (reg->type == PTR_TO_STACK) {
- spi = dynptr_get_spi(env, reg);
- if (spi < 0 || !state->stack[spi].spilled_ptr.id) {
- verbose(env, "arg %d is an unacquired reference\n", regno);
- return -EINVAL;
- }
- } else {
- verbose(env, "cannot release unowned const bpf_dynptr\n");
- return -EINVAL;
- }
- } else if (!reg->ref_obj_id && !bpf_register_is_null(reg)) {
+ if (!arg_type_is_dynptr(arg_type) && !reg->ref_obj_id && !bpf_register_is_null(reg)) {
verbose(env, "R%d must be referenced when passed to release function\n",
regno);
return -EINVAL;
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index b141cf75cc6a..60324d68b349 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -136,7 +136,7 @@ int ringbuf_missing_release_callback(void *ctx)
/* Can't call bpf_ringbuf_submit/discard_dynptr on a non-initialized dynptr */
SEC("?raw_tp")
-__failure __msg("arg 1 is an unacquired reference")
+__failure __msg("Expected an initialized dynptr as R1")
int ringbuf_release_uninit_dynptr(void *ctx)
{
struct bpf_dynptr ptr;
@@ -650,7 +650,7 @@ int invalid_offset(void *ctx)
/* Can't release a dynptr twice */
SEC("?raw_tp")
-__failure __msg("arg 1 is an unacquired reference")
+__failure __msg("Expected an initialized dynptr as R1")
int release_twice(void *ctx)
{
struct bpf_dynptr ptr;
@@ -677,7 +677,7 @@ static int release_twice_callback_fn(__u32 index, void *data)
* within a callback function, fails
*/
SEC("?raw_tp")
-__failure __msg("arg 1 is an unacquired reference")
+__failure __msg("Expected an initialized dynptr as R1")
int release_twice_callback(void *ctx)
{
struct bpf_dynptr ptr;
diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
index 54de0389f878..c0d0422b8030 100644
--- a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
+++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
@@ -146,7 +146,7 @@ try_discard_dynptr(struct bpf_dynptr *dynptr, void *context)
* not be able to read past the end of the pointer.
*/
SEC("?raw_tp")
-__failure __msg("cannot release unowned const bpf_dynptr")
+__failure __msg("CONST_PTR_TO_DYNPTR cannot be released")
int user_ringbuf_callback_discard_dynptr(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, try_discard_dynptr, NULL, 0);
@@ -166,7 +166,7 @@ try_submit_dynptr(struct bpf_dynptr *dynptr, void *context)
* not be able to read past the end of the pointer.
*/
SEC("?raw_tp")
-__failure __msg("cannot release unowned const bpf_dynptr")
+__failure __msg("CONST_PTR_TO_DYNPTR cannot be released")
int user_ringbuf_callback_submit_dynptr(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, try_submit_dynptr, NULL, 0);
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH bpf-next v4 07/12] bpf: Unify referenced object tracking in verifier
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
` (5 preceding siblings ...)
2026-05-06 14:27 ` [PATCH bpf-next v4 06/12] bpf: Remove redundant dynptr arg check for helper Amery Hung
@ 2026-05-06 14:27 ` Amery Hung
2026-05-06 14:27 ` [PATCH bpf-next v4 08/12] bpf: Unify release handling for helpers and kfuncs Amery Hung
` (4 subsequent siblings)
11 siblings, 0 replies; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:27 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
Helpers and kfuncs independently tracked referenced object metadata (id,
ref_obj_id) using separate fields on their respective arg_meta structs.
This led to duplicated logic and inconsistent error handling between the
two paths.
Introduce struct ref_obj_desc to consolidate these fields along with a
count of how many arguments carry a reference. Add update_ref_obj() to
populate it from a bpf_reg_state, replacing open-coded assignments in
check_func_arg(), check_kfunc_args(), and process_iter_arg().
Both helper and kfunc now use ref_obj.cnt to detect ambiguous ref_obj
args. For ref_obj releasing helpers and kfuncs, keep checking it before
calling update_ref_obj() for now. A later patch will make these
functions not depending on ref_obj. For other users of ref_obj, move the
checks to the use locations. For helper, this means moving the checks
inside helper_multiple_ref_obj_use() to use locations.
is_acquire_function() is dropped as ref_obj is never used.
Pass ref_obj_desc into process_dynptr_func()/mark_stack_slots_dynptr()
instead of a bare parent_id to make it less confusing.
Drop the selftest introduced in 7ec899ac90a2 (“selftests/bpf: Negative
test case for ref_obj_id in args”) since the verifier no longer
complains about ambiguous ref_obj if it is not used.
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
include/linux/bpf_verifier.h | 15 ++-
kernel/bpf/verifier.c | 111 +++++++++----------
tools/testing/selftests/bpf/verifier/calls.c | 24 ----
3 files changed, 68 insertions(+), 82 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 51d5f5dd6e5b..a531be98fedf 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -1397,6 +1397,18 @@ struct bpf_dynptr_desc {
u32 parent_id;
};
+/*
+ * The last seen rereferenced object; Updated by update_ref_obj() when a register refers to a
+ * referenced object. Used when the helper or kfunc is releasing a referenced object, casting
+ * a referenced object, returning allocated memory derived from referenced object or creating
+ * a dynptr with a referenced object as parent.
+ */
+struct ref_obj_desc {
+ u32 id;
+ u32 ref_obj_id;
+ u8 cnt;
+};
+
struct bpf_kfunc_call_arg_meta {
/* In parameters */
struct btf *btf;
@@ -1405,8 +1417,6 @@ struct bpf_kfunc_call_arg_meta {
const struct btf_type *func_proto;
const char *func_name;
/* Out parameters */
- u32 ref_obj_id;
- u32 id;
u8 release_regno;
bool r0_rdonly;
u32 ret_btf_id;
@@ -1444,6 +1454,7 @@ struct bpf_kfunc_call_arg_meta {
} iter;
struct bpf_map_desc map;
struct bpf_dynptr_desc dynptr;
+ struct ref_obj_desc ref_obj;
u64 mem_size;
};
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 792e61e07b06..542912c7983f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -234,6 +234,7 @@ static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
struct bpf_call_arg_meta {
struct bpf_map_desc map;
struct bpf_dynptr_desc dynptr;
+ struct ref_obj_desc ref_obj;
bool raw_mode;
bool pkt_access;
u8 release_regno;
@@ -525,20 +526,6 @@ bool bpf_is_may_goto_insn(struct bpf_insn *insn)
return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO;
}
-static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
- const struct bpf_map *map)
-{
- int ref_obj_uses = 0;
-
- if (is_ptr_cast_function(func_id))
- ref_obj_uses++;
- if (is_acquire_function(func_id, map))
- ref_obj_uses++;
-
- return ref_obj_uses > 1;
-}
-
-
static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
{
int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
@@ -667,11 +654,11 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi);
static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- enum bpf_arg_type arg_type, int insn_idx, int parent_id,
- struct bpf_dynptr_desc *dynptr)
+ enum bpf_arg_type arg_type, int insn_idx,
+ struct ref_obj_desc *ref_obj, struct bpf_dynptr_desc *dynptr)
{
struct bpf_func_state *state = bpf_func(env, reg);
- int spi, i, err, ref_obj_id = 0;
+ int spi, i, err, ref_obj_id = 0, parent_id = 0;
enum bpf_dynptr_type type;
spi = dynptr_get_spi(env, reg);
@@ -713,6 +700,13 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
return ref_obj_id;
}
/* Track parent's id if the parent is a referenced object */
+ if (ref_obj && ref_obj->ref_obj_id) {
+ if (ref_obj->cnt > 1) {
+ verifier_bug(env, "function expects only one referenced object but got %d\n", ref_obj->cnt);
+ return -EFAULT;
+ }
+ parent_id = ref_obj->id;
+ }
} else { /* bpf_dynptr_clone() */
ref_obj_id = dynptr->ref_obj_id;
parent_id = dynptr->parent_id;
@@ -6982,7 +6976,7 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno,
*/
static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
argno_t argno, int insn_idx, enum bpf_arg_type arg_type,
- int parent_id, struct bpf_dynptr_desc *dynptr)
+ struct ref_obj_desc *ref_obj, struct bpf_dynptr_desc *dynptr)
{
int spi, err = 0;
@@ -7023,7 +7017,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_stat
return err;
}
- err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, parent_id, dynptr);
+ err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, ref_obj, dynptr);
} else /* OBJ_RELEASE and None case from above */ {
/* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
if (reg->type == CONST_PTR_TO_DYNPTR && (arg_type & OBJ_RELEASE)) {
@@ -7072,13 +7066,6 @@ static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_stat
return err;
}
-static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi)
-{
- struct bpf_func_state *state = bpf_func(env, reg);
-
- return state->stack[spi].spilled_ptr.ref_obj_id;
-}
-
static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta)
{
return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
@@ -7108,9 +7095,17 @@ static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg_idx,
return btf_param_match_suffix(meta->btf, arg, "__iter");
}
+static void update_ref_obj(struct ref_obj_desc *ref_obj, struct bpf_reg_state *reg)
+{
+ ref_obj->id = reg->id;
+ ref_obj->ref_obj_id = reg->ref_obj_id;
+ ref_obj->cnt++;
+}
+
static int process_iter_arg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, argno_t argno, int insn_idx,
struct bpf_kfunc_call_arg_meta *meta)
{
+ struct bpf_func_state *state = bpf_func(env, reg);
const struct btf_type *t;
u32 arg_idx = arg_from_argno(argno) - 1;
int spi, err, i, nr_slots, btf_id;
@@ -7182,7 +7177,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, struct bpf_reg_state *
/* remember meta->iter info for process_iter_next_call() */
meta->iter.spi = spi;
meta->iter.frameno = reg->frameno;
- meta->ref_obj_id = iter_ref_obj_id(env, reg, spi);
+ update_ref_obj(&meta->ref_obj, &state->stack[spi].spilled_ptr);
if (is_iter_destroy_kfunc(meta)) {
err = unmark_stack_slots_iter(env, reg, nr_slots);
@@ -7961,6 +7956,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
u32 regno = BPF_REG_1 + arg;
struct bpf_reg_state *reg = reg_state(env, regno);
enum bpf_arg_type arg_type = fn->arg_type[arg];
+ argno_t argno = argno_from_arg(arg + 1);
enum bpf_reg_type type = reg->type;
u32 *arg_btf_id = NULL;
u32 key_size;
@@ -8028,14 +8024,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
}
if (reg->ref_obj_id && base_type(arg_type) != ARG_KPTR_XCHG_DEST) {
- if (meta->ref_obj_id) {
- verbose(env, "more than one arg with ref_obj_id R%d %u %u",
- regno, reg->ref_obj_id,
- meta->ref_obj_id);
+ if (meta->release_regno && meta->ref_obj.cnt) {
+ verbose(env, "more than one arg with ref_obj_id %s %u %u",
+ reg_arg_name(env, argno), reg->ref_obj_id,
+ meta->ref_obj.ref_obj_id);
return -EACCES;
}
- meta->ref_obj_id = reg->ref_obj_id;
- meta->id = reg->id;
+ update_ref_obj(&meta->ref_obj, reg);
}
switch (base_type(arg_type)) {
@@ -8175,7 +8170,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
true, meta);
break;
case ARG_PTR_TO_DYNPTR:
- err = process_dynptr_func(env, reg, argno_from_reg(regno), insn_idx, arg_type, 0,
+ err = process_dynptr_func(env, reg, argno_from_reg(regno), insn_idx, arg_type, NULL,
&meta->dynptr);
if (err)
return err;
@@ -8900,7 +8895,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
if (ret)
return ret;
- ret = process_dynptr_func(env, reg, argno, -1, arg->arg_type, 0, NULL);
+ ret = process_dynptr_func(env, reg, argno, -1, arg->arg_type, NULL, NULL);
if (ret)
return ret;
} else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
@@ -9878,8 +9873,8 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
err = -EINVAL;
if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) {
err = unmark_stack_slots_dynptr(env, ®s[meta.release_regno]);
- } else if (func_id == BPF_FUNC_kptr_xchg && meta.ref_obj_id) {
- u32 ref_obj_id = meta.ref_obj_id;
+ } else if (func_id == BPF_FUNC_kptr_xchg && meta.ref_obj.ref_obj_id) {
+ u32 ref_obj_id = meta.ref_obj.ref_obj_id;
bool in_rcu = in_rcu_cs(env);
struct bpf_func_state *state;
struct bpf_reg_state *reg;
@@ -9898,10 +9893,10 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
}
}));
}
- } else if (meta.ref_obj_id) {
- err = release_reference(env, meta.ref_obj_id);
+ } else if (meta.ref_obj.ref_obj_id) {
+ err = release_reference(env, meta.ref_obj.ref_obj_id);
} else if (bpf_register_is_null(®s[meta.release_regno])) {
- /* meta.ref_obj_id can only be 0 if register that is meant to be
+ /* meta.ref_obj.ref_obj_id can only be 0 if register that is meant to be
* released is NULL, which must be > R0.
*/
err = 0;
@@ -10165,15 +10160,14 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
if (type_may_be_null(regs[BPF_REG_0].type))
regs[BPF_REG_0].id = ++env->id_gen;
- if (helper_multiple_ref_obj_use(func_id, meta.map.ptr)) {
- verifier_bug(env, "func %s#%d sets ref_obj_id more than once",
- func_id_name(func_id), func_id);
- return -EFAULT;
- }
-
if (is_ptr_cast_function(func_id)) {
/* For release_reference() */
- regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
+ if (meta.ref_obj.cnt > 1) {
+ verifier_bug(env, "function expects only one referenced object but got %d\n",
+ meta.ref_obj.cnt);
+ return -EFAULT;
+ }
+ regs[BPF_REG_0].ref_obj_id = meta.ref_obj.ref_obj_id;
} else if (is_acquire_function(func_id, meta.map.ptr)) {
int id = acquire_reference(env, insn_idx);
@@ -11616,14 +11610,13 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
if (reg->ref_obj_id) {
- if (is_kfunc_release(meta) && meta->ref_obj_id) {
- verifier_bug(env, "more than one arg with ref_obj_id %s %u %u",
- reg_arg_name(env, argno), reg->ref_obj_id,
- meta->ref_obj_id);
+ if (is_kfunc_release(meta) && meta->ref_obj.cnt) {
+ verbose(env, "more than one arg with ref_obj_id %s %u %u",
+ reg_arg_name(env, argno), reg->ref_obj_id,
+ meta->ref_obj.ref_obj_id);
return -EFAULT;
}
- meta->ref_obj_id = reg->ref_obj_id;
- meta->id = reg->id;
+ update_ref_obj(&meta->ref_obj, reg);
if (is_kfunc_release(meta))
meta->release_regno = regno;
}
@@ -11794,7 +11787,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
ret = process_dynptr_func(env, reg, argno, insn_idx, dynptr_arg_type,
- meta->ref_obj_id ? meta->id : 0, &meta->dynptr);
+ &meta->ref_obj, &meta->dynptr);
if (ret < 0)
return ret;
break;
@@ -12740,8 +12733,14 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
regs[BPF_REG_0].type |= MEM_RDONLY;
/* Ensures we don't access the memory after a release_reference() */
- if (meta.ref_obj_id)
- regs[BPF_REG_0].parent_id = meta.ref_obj_id;
+ if (meta.ref_obj.ref_obj_id) {
+ if (meta.ref_obj.cnt > 1) {
+ verifier_bug(env, "function expects only one referenced object but got %d\n",
+ meta.ref_obj.cnt);
+ return -EFAULT;
+ }
+ regs[BPF_REG_0].parent_id = meta.ref_obj.ref_obj_id;
+ }
if (is_kfunc_rcu_protected(&meta))
regs[BPF_REG_0].type |= MEM_RCU;
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 0bb4337552c8..42d523a21a43 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -2410,27 +2410,3 @@
.errstr_unpriv = "",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
-{
- "calls: several args with ref_obj_id",
- .insns = {
- /* Reserve at least sizeof(struct iphdr) bytes in the ring buffer.
- * With a smaller size, the verifier would reject the call to
- * bpf_tcp_raw_gen_syncookie_ipv4 before we can reach the
- * ref_obj_id error.
- */
- BPF_MOV64_IMM(BPF_REG_2, 20),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
- /* if r0 == 0 goto <exit> */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tcp_raw_gen_syncookie_ipv4),
- BPF_EXIT_INSN(),
- },
- .fixup_map_ringbuf = { 2 },
- .result = REJECT,
- .errstr = "more than one arg with ref_obj_id",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-},
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH bpf-next v4 08/12] bpf: Unify release handling for helpers and kfuncs
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
` (6 preceding siblings ...)
2026-05-06 14:27 ` [PATCH bpf-next v4 07/12] bpf: Unify referenced object tracking in verifier Amery Hung
@ 2026-05-06 14:27 ` Amery Hung
2026-05-06 14:27 ` [PATCH bpf-next v4 09/12] selftests/bpf: Test creating dynptr from dynptr data and slice Amery Hung
` (3 subsequent siblings)
11 siblings, 0 replies; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:27 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
Introduce release_reg() to consolidate the release logic shared by both
helpers and kfuncs: dynptr release, kptr_xchg percpu-to-RCU conversion,
regular reference release, and NULL pass-through. NULL pass-through is
only allowed if the prototype indicates the argument may be null.
Determine release_regno from the function prototype/metadata before
argument checking, rather than discovering it dynamically during
argument processing. For helpers, scan the arg_type array in
check_func_proto() via check_proto_release_reg(). For kfuncs, set
release_regno to BPF_REG_1 in bpf_fetch_kfunc_arg_meta() when
KF_RELEASE is set. In the future when we start adding decl_tag to
kfunc arguments, we can just look at the function prototype instead
of a release_regno.
Extract ref_convert_alloc_rcu_protected() and
invalidate_rcu_protected_refs() to make it more clear what the code is
doing. For ref_convert_alloc_rcu_protected(), it pre-converts
MEM_ALLOC | MEM_PERCPU registers to MEM_RCU (clearing ref_obj_id so they
survive), then calls release_reference() to invalidate the remaining
registers and release the reference state.
Add KF_RELEASE to bpf_dynptr_file_discard() so its release_regno is set
via fetch_kfunc_meta rather than being assigned manually in the dynptr
argument processing. Set arg_type to ARG_PTR_TO_DYNPTR for
KF_ARG_PTR_TO_DYNPTR so that check_func_arg_reg_off() correctly allows
non-zero stack offsets for dynptr release arguments same as helper.
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
include/linux/bpf_verifier.h | 6 +-
kernel/bpf/helpers.c | 2 +-
kernel/bpf/verifier.c | 194 +++++++++---------
.../selftests/bpf/prog_tests/cb_refs.c | 2 +-
.../selftests/bpf/progs/cgrp_kfunc_failure.c | 6 +-
.../selftests/bpf/progs/map_kptr_fail.c | 2 +-
.../selftests/bpf/progs/task_kfunc_failure.c | 6 +-
.../bpf/progs/verifier_global_ptr_args.c | 2 +-
.../bpf/progs/verifier_ref_tracking.c | 2 +-
.../selftests/bpf/progs/verifier_sock.c | 6 +-
.../selftests/bpf/progs/verifier_vfs_reject.c | 2 +-
11 files changed, 120 insertions(+), 110 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index a531be98fedf..099ca4b95f0d 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -1399,9 +1399,9 @@ struct bpf_dynptr_desc {
/*
* The last seen rereferenced object; Updated by update_ref_obj() when a register refers to a
- * referenced object. Used when the helper or kfunc is releasing a referenced object, casting
- * a referenced object, returning allocated memory derived from referenced object or creating
- * a dynptr with a referenced object as parent.
+ * referenced object. Used when the helper or kfunc is casting a referenced object, returning
+ * allocated memory derived from referenced object or creating a dynptr with a referenced
+ * object as parent.
*/
struct ref_obj_desc {
u32 id;
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index baa12b24bb64..58908f8cf540 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -4857,7 +4857,7 @@ BTF_ID_FLAGS(func, bpf_stream_print_stack, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_dynptr_from_file)
-BTF_ID_FLAGS(func, bpf_dynptr_file_discard)
+BTF_ID_FLAGS(func, bpf_dynptr_file_discard, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_timer_cancel_async)
BTF_KFUNCS_END(common_btf_ids)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 542912c7983f..e40f09cefa29 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -8010,28 +8010,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
return err;
skip_type_check:
- if (arg_type_is_release(arg_type)) {
- if (!arg_type_is_dynptr(arg_type) && !reg->ref_obj_id && !bpf_register_is_null(reg)) {
- verbose(env, "R%d must be referenced when passed to release function\n",
- regno);
- return -EINVAL;
- }
- if (meta->release_regno) {
- verifier_bug(env, "more than one release argument");
- return -EFAULT;
- }
- meta->release_regno = regno;
+ if (arg_type_is_release(arg_type) && !arg_type_is_dynptr(arg_type) &&
+ !reg->ref_obj_id && !bpf_register_is_null(reg)) {
+ verbose(env, "release helper %s expects referenced PTR_TO_BTF_ID passed to %s\n",
+ func_id_name(meta->func_id), reg_arg_name(env, argno));
+ return -EINVAL;
}
- if (reg->ref_obj_id && base_type(arg_type) != ARG_KPTR_XCHG_DEST) {
- if (meta->release_regno && meta->ref_obj.cnt) {
- verbose(env, "more than one arg with ref_obj_id %s %u %u",
- reg_arg_name(env, argno), reg->ref_obj_id,
- meta->ref_obj.ref_obj_id);
- return -EACCES;
- }
+ if (reg->ref_obj_id)
update_ref_obj(&meta->ref_obj, reg);
- }
switch (base_type(arg_type)) {
case ARG_CONST_MAP_PTR:
@@ -8590,11 +8577,29 @@ static bool check_mem_arg_rw_flag_ok(const struct bpf_func_proto *fn)
return true;
}
-static int check_func_proto(const struct bpf_func_proto *fn)
+static bool check_proto_release_reg(const struct bpf_func_proto *fn, struct bpf_call_arg_meta *meta)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
+ enum bpf_arg_type arg_type = fn->arg_type[i];
+
+ if (arg_type_is_release(arg_type)) {
+ if (meta->release_regno)
+ return false;
+ meta->release_regno = i + 1;
+ }
+ }
+
+ return true;
+}
+
+static int check_func_proto(const struct bpf_func_proto *fn, struct bpf_call_arg_meta *meta)
{
return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) &&
check_mem_arg_rw_flag_ok(fn) &&
+ check_proto_release_reg(fn, meta) &&
check_btf_id_ok(fn) ? 0 : -EINVAL;
}
@@ -8752,6 +8757,42 @@ static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
}));
}
+static void invalidate_rcu_protected_refs(struct bpf_verifier_env *env)
+{
+ struct bpf_stack_state *stack;
+ struct bpf_func_state *state;
+ struct bpf_reg_state *reg;
+ u32 clear_mask = (1 << STACK_SPILL) | (1 << STACK_ITER);
+
+ bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, stack, clear_mask, ({
+ if (reg->type & MEM_RCU) {
+ reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL);
+ reg->type |= PTR_UNTRUSTED;
+ }
+ }));
+}
+
+static int ref_convert_alloc_rcu_protected(struct bpf_verifier_env *env, u32 ref_obj_id)
+{
+ struct bpf_func_state *state;
+ struct bpf_reg_state *reg;
+ int err;
+
+ err = release_reference_nomark(env->cur_state, ref_obj_id);
+
+ bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
+ if (reg->ref_obj_id != ref_obj_id)
+ continue;
+ if ((reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU)) {
+ reg->ref_obj_id = 0;
+ reg->type &= ~MEM_ALLOC;
+ reg->type |= MEM_RCU;
+ }
+ }));
+
+ return err;
+}
+
static void clear_caller_saved_regs(struct bpf_verifier_env *env,
struct bpf_reg_state *regs)
{
@@ -9776,6 +9817,23 @@ static const char *non_sleepable_context_description(struct bpf_verifier_env *en
return "non-sleepable prog";
}
+static int release_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ bool convert_rcu, bool release_dynptr)
+{
+ int err = -EINVAL;
+
+ if (release_dynptr)
+ err = unmark_stack_slots_dynptr(env, reg);
+ else if (convert_rcu)
+ err = ref_convert_alloc_rcu_protected(env, reg->ref_obj_id);
+ else if (reg->ref_obj_id)
+ err = release_reference(env, reg->ref_obj_id);
+ else if (bpf_register_is_null(reg))
+ err = 0;
+
+ return err;
+}
+
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx_p)
{
@@ -9825,7 +9883,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
- err = check_func_proto(fn);
+ err = check_func_proto(fn, &meta);
if (err) {
verifier_bug(env, "incorrect func proto %s#%d", func_id_name(func_id), func_id);
return err;
@@ -9870,37 +9928,11 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
}
if (meta.release_regno) {
- err = -EINVAL;
- if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) {
- err = unmark_stack_slots_dynptr(env, ®s[meta.release_regno]);
- } else if (func_id == BPF_FUNC_kptr_xchg && meta.ref_obj.ref_obj_id) {
- u32 ref_obj_id = meta.ref_obj.ref_obj_id;
- bool in_rcu = in_rcu_cs(env);
- struct bpf_func_state *state;
- struct bpf_reg_state *reg;
-
- err = release_reference_nomark(env->cur_state, ref_obj_id);
- if (!err) {
- bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
- if (reg->ref_obj_id == ref_obj_id) {
- if (in_rcu && (reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU)) {
- reg->ref_obj_id = 0;
- reg->type &= ~MEM_ALLOC;
- reg->type |= MEM_RCU;
- } else {
- mark_reg_invalid(env, reg);
- }
- }
- }));
- }
- } else if (meta.ref_obj.ref_obj_id) {
- err = release_reference(env, meta.ref_obj.ref_obj_id);
- } else if (bpf_register_is_null(®s[meta.release_regno])) {
- /* meta.ref_obj.ref_obj_id can only be 0 if register that is meant to be
- * released is NULL, which must be > R0.
- */
- err = 0;
- }
+ struct bpf_reg_state *reg = ®s[meta.release_regno];
+ bool convert_rcu = (func_id == BPF_FUNC_kptr_xchg) && in_rcu_cs(env) &&
+ (reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU);
+
+ err = release_reg(env, reg, convert_rcu, !!meta.dynptr.ref_obj_id);
if (err)
return err;
}
@@ -10277,7 +10309,6 @@ static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta)
return meta->kfunc_flags & KF_RELEASE;
}
-
static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta)
{
return meta->kfunc_flags & KF_DESTRUCTIVE;
@@ -11609,18 +11640,16 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EACCES;
}
- if (reg->ref_obj_id) {
- if (is_kfunc_release(meta) && meta->ref_obj.cnt) {
- verbose(env, "more than one arg with ref_obj_id %s %u %u",
- reg_arg_name(env, argno), reg->ref_obj_id,
- meta->ref_obj.ref_obj_id);
- return -EFAULT;
- }
- update_ref_obj(&meta->ref_obj, reg);
- if (is_kfunc_release(meta))
- meta->release_regno = regno;
+ if (regno == meta->release_regno && !is_kfunc_arg_dynptr(meta->btf, &args[i]) &&
+ !reg->ref_obj_id && !bpf_register_is_null(reg)) {
+ verbose(env, "release kfunc %s expects referenced PTR_TO_BTF_ID passed to %s\n",
+ func_name, reg_arg_name(env, argno));
+ return -EINVAL;
}
+ if (reg->ref_obj_id)
+ update_ref_obj(&meta->ref_obj, reg);
+
ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
ref_tname = btf_name_by_offset(btf, ref_t->name_off);
@@ -11683,7 +11712,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
}
fallthrough;
- case KF_ARG_PTR_TO_DYNPTR:
case KF_ARG_PTR_TO_ITER:
case KF_ARG_PTR_TO_LIST_HEAD:
case KF_ARG_PTR_TO_LIST_NODE:
@@ -11700,6 +11728,9 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_IRQ_FLAG:
case KF_ARG_PTR_TO_RES_SPIN_LOCK:
break;
+ case KF_ARG_PTR_TO_DYNPTR:
+ arg_type = ARG_PTR_TO_DYNPTR;
+ break;
case KF_ARG_PTR_TO_CTX:
arg_type = ARG_PTR_TO_CTX;
break;
@@ -11708,7 +11739,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EFAULT;
}
- if (is_kfunc_release(meta) && reg->ref_obj_id)
+ if (regno == meta->release_regno)
arg_type |= OBJ_RELEASE;
ret = check_func_arg_reg_off(env, reg, argno, arg_type);
if (ret < 0)
@@ -11773,7 +11804,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
dynptr_arg_type |= DYNPTR_TYPE_FILE;
} else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_file_discard]) {
dynptr_arg_type |= DYNPTR_TYPE_FILE | OBJ_RELEASE;
- meta->release_regno = regno;
} else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] &&
(dynptr_arg_type & MEM_UNINIT)) {
enum bpf_dynptr_type parent_type = meta->dynptr.type;
@@ -12051,12 +12081,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
}
- if (is_kfunc_release(meta) && !meta->release_regno) {
- verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
- func_name);
- return -EINVAL;
- }
-
return 0;
}
@@ -12083,6 +12107,9 @@ int bpf_fetch_kfunc_arg_meta(struct bpf_verifier_env *env,
meta->kfunc_flags = *kfunc.flags;
+ if (is_kfunc_release(meta))
+ meta->release_regno = BPF_REG_1;
+
return 0;
}
@@ -12575,23 +12602,12 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (rcu_lock) {
env->cur_state->active_rcu_locks++;
} else if (rcu_unlock) {
- struct bpf_stack_state *stack;
- struct bpf_func_state *state;
- struct bpf_reg_state *reg;
- u32 clear_mask = (1 << STACK_SPILL) | (1 << STACK_ITER);
-
if (env->cur_state->active_rcu_locks == 0) {
verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name);
return -EINVAL;
}
- if (--env->cur_state->active_rcu_locks == 0) {
- bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, stack, clear_mask, ({
- if (reg->type & MEM_RCU) {
- reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL);
- reg->type |= PTR_UNTRUSTED;
- }
- }));
- }
+ if (--env->cur_state->active_rcu_locks == 0)
+ invalidate_rcu_protected_refs(env);
} else if (preempt_disable) {
env->cur_state->active_preempt_locks++;
} else if (preempt_enable) {
@@ -12622,13 +12638,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
* PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
*/
if (meta.release_regno) {
- struct bpf_reg_state *reg = ®s[meta.release_regno];
-
- if (meta.dynptr.ref_obj_id) {
- err = unmark_stack_slots_dynptr(env, reg);
- } else {
- err = release_reference(env, reg->ref_obj_id);
- }
+ err = release_reg(env, ®s[meta.release_regno], false, !!meta.dynptr.ref_obj_id);
if (err)
return err;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/cb_refs.c b/tools/testing/selftests/bpf/prog_tests/cb_refs.c
index 6300b67a3a84..78566b817fd7 100644
--- a/tools/testing/selftests/bpf/prog_tests/cb_refs.c
+++ b/tools/testing/selftests/bpf/prog_tests/cb_refs.c
@@ -11,7 +11,7 @@ struct {
const char *prog_name;
const char *err_msg;
} cb_refs_tests[] = {
- { "underflow_prog", "must point to scalar, or struct with scalar" },
+ { "underflow_prog", "release kfunc bpf_kfunc_call_test_release expects referenced PTR_TO_BTF_ID passed to R1" },
{ "leak_prog", "Possibly NULL pointer passed to helper R2" },
{ "nested_cb", "Unreleased reference id=4 alloc_insn=2" }, /* alloc_insn=2{4,5} */
{ "non_cb_transfer_ref", "Unreleased reference id=4 alloc_insn=1" }, /* alloc_insn=1{1,2} */
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
index a875ba8e5007..d0d65d6d450c 100644
--- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
@@ -154,7 +154,7 @@ int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path)
}
SEC("tp_btf/cgroup_mkdir")
-__failure __msg("must be referenced or trusted")
+__failure __msg("release kfunc bpf_cgroup_release expects referenced PTR_TO_BTF_ID passed to R1")
int BPF_PROG(cgrp_kfunc_rcu_get_release, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
@@ -191,7 +191,7 @@ int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path
}
SEC("tp_btf/cgroup_mkdir")
-__failure __msg("R1 pointer type STRUCT cgroup must point")
+__failure __msg("release kfunc bpf_cgroup_release expects referenced PTR_TO_BTF_ID passed to R1")
int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired = (struct cgroup *)&path;
@@ -237,7 +237,7 @@ int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path)
}
SEC("tp_btf/cgroup_mkdir")
-__failure __msg("release kernel function bpf_cgroup_release expects")
+__failure __msg("release kfunc bpf_cgroup_release expects referenced PTR_TO_BTF_ID passed to R1")
int BPF_PROG(cgrp_kfunc_release_unacquired, struct cgroup *cgrp, const char *path)
{
/* Cannot release trusted cgroup pointer which was not acquired. */
diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
index 8f36e74fd8f9..f11848dfa78f 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
@@ -252,7 +252,7 @@ int reject_untrusted_store_to_ref(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("R2 must be referenced")
+__failure __msg("release helper bpf_kptr_xchg expects referenced PTR_TO_BTF_ID passed to R2")
int reject_untrusted_xchg(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
index 41047d81ec42..8e947d445f8e 100644
--- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
@@ -178,7 +178,7 @@ int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_f
}
SEC("tp_btf/task_newtask")
-__failure __msg("R1 pointer type STRUCT task_struct must point")
+__failure __msg("release kfunc bpf_task_release expects referenced PTR_TO_BTF_ID passed to R1")
int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired = (struct task_struct *)&clone_flags;
@@ -224,7 +224,7 @@ int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags)
}
SEC("tp_btf/task_newtask")
-__failure __msg("release kernel function bpf_task_release expects")
+__failure __msg("release kfunc bpf_task_release expects referenced PTR_TO_BTF_ID passed to R1")
int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags)
{
/* Cannot release trusted task pointer which was not acquired. */
@@ -313,7 +313,7 @@ int BPF_PROG(task_access_comm4, struct task_struct *task, const char *buf, bool
}
SEC("tp_btf/task_newtask")
-__failure __msg("R1 must be referenced or trusted")
+__failure __msg("release kfunc bpf_task_release expects referenced PTR_TO_BTF_ID passed to R1")
int BPF_PROG(task_kfunc_release_in_map, struct task_struct *task, u64 clone_flags)
{
struct task_struct *local;
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
index e7dae0cf9c17..ea273e152209 100644
--- a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
+++ b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
@@ -153,7 +153,7 @@ __weak int subprog_trusted_destroy(struct task_struct *task __arg_trusted)
SEC("?tp_btf/task_newtask")
__failure __log_level(2)
-__msg("release kernel function bpf_task_release expects refcounted PTR_TO_BTF_ID")
+__msg("release kfunc bpf_task_release expects referenced PTR_TO_BTF_ID passed to R1")
int BPF_PROG(trusted_destroy_fail, struct task_struct *task, u64 clone_flags)
{
return subprog_trusted_destroy(task);
diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c
index 139f70bb3595..199ad18f8eb5 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c
@@ -1288,7 +1288,7 @@ l1_%=: r1 = r6; \
SEC("tc")
__description("reference tracking: bpf_sk_release(listen_sk)")
-__failure __msg("R1 must be referenced when passed to release function")
+__failure __msg("release helper bpf_sk_release expects referenced PTR_TO_BTF_ID passed to R1")
__naked void bpf_sk_release_listen_sk(void)
{
asm volatile (
diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c
index a2132c72d3b8..9f680cf44512 100644
--- a/tools/testing/selftests/bpf/progs/verifier_sock.c
+++ b/tools/testing/selftests/bpf/progs/verifier_sock.c
@@ -603,7 +603,7 @@ l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
SEC("tc")
__description("bpf_sk_release(skb->sk)")
-__failure __msg("R1 must be referenced when passed to release function")
+__failure __msg("release helper bpf_sk_release expects referenced PTR_TO_BTF_ID passed to R1")
__naked void bpf_sk_release_skb_sk(void)
{
asm volatile (" \
@@ -620,7 +620,7 @@ l0_%=: r0 = 0; \
SEC("tc")
__description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
-__failure __msg("R1 must be referenced when passed to release function")
+__failure __msg("release helper bpf_sk_release expects referenced PTR_TO_BTF_ID passed to R1")
__naked void bpf_sk_fullsock_skb_sk(void)
{
asm volatile (" \
@@ -644,7 +644,7 @@ l1_%=: r1 = r0; \
SEC("tc")
__description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
-__failure __msg("R1 must be referenced when passed to release function")
+__failure __msg("release helper bpf_sk_release expects referenced PTR_TO_BTF_ID passed to R1")
__naked void bpf_tcp_sock_skb_sk(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
index 0990de076844..2870738d93f7 100644
--- a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
+++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
@@ -80,7 +80,7 @@ int BPF_PROG(get_task_exe_file_kfunc_unreleased)
}
SEC("lsm.s/file_open")
-__failure __msg("release kernel function bpf_put_file expects")
+__failure __msg("release kfunc bpf_put_file expects referenced PTR_TO_BTF_ID passed to R1")
int BPF_PROG(put_file_kfunc_unacquired, struct file *file)
{
/* Can't release an unacquired pointer. */
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH bpf-next v4 09/12] selftests/bpf: Test creating dynptr from dynptr data and slice
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
` (7 preceding siblings ...)
2026-05-06 14:27 ` [PATCH bpf-next v4 08/12] bpf: Unify release handling for helpers and kfuncs Amery Hung
@ 2026-05-06 14:27 ` Amery Hung
2026-05-06 14:27 ` [PATCH bpf-next v4 10/12] selftests/bpf: Test using dynptr after freeing the underlying object Amery Hung
` (2 subsequent siblings)
11 siblings, 0 replies; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:27 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
The verifier currently does not allow creating dynptr from dynptr data
or slice. Add a selftest to test this explicitly.
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
.../testing/selftests/bpf/progs/dynptr_fail.c | 42 +++++++++++++++++++
1 file changed, 42 insertions(+)
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index 60324d68b349..31962233bea1 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -705,6 +705,48 @@ int dynptr_from_mem_invalid_api(void *ctx)
return 0;
}
+/* Cannot create dynptr from dynptr data */
+SEC("?raw_tp")
+__failure __msg("Unsupported reg type mem for bpf_dynptr_from_mem data")
+int dynptr_from_dynptr_data(void *ctx)
+{
+ struct bpf_dynptr ptr, ptr2;
+ __u8 *data;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+
+ data = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
+ if (!data)
+ return 0;
+
+ /* this should fail */
+ bpf_dynptr_from_mem(data, sizeof(__u32), 0, &ptr2);
+
+ return 0;
+}
+
+/* Cannot create dynptr from dynptr slice */
+SEC("?tc")
+__failure __msg("Unsupported reg type mem for bpf_dynptr_from_mem data")
+int dynptr_from_dynptr_slice(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr, ptr2;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ /* this should fail */
+ bpf_dynptr_from_mem(hdr, sizeof(*hdr), 0, &ptr2);
+
+ return SK_PASS;
+}
+
SEC("?tc")
__failure __msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.") __log_level(2)
int dynptr_pruning_overwrite(struct __sk_buff *ctx)
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH bpf-next v4 10/12] selftests/bpf: Test using dynptr after freeing the underlying object
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
` (8 preceding siblings ...)
2026-05-06 14:27 ` [PATCH bpf-next v4 09/12] selftests/bpf: Test creating dynptr from dynptr data and slice Amery Hung
@ 2026-05-06 14:27 ` Amery Hung
2026-05-06 14:27 ` [PATCH bpf-next v4 11/12] selftests/bpf: Test using slice after invalidating dynptr clone Amery Hung
2026-05-06 14:27 ` [PATCH bpf-next v4 12/12] selftests/bpf: Test using file dynptr after the reference on file is dropped Amery Hung
11 siblings, 0 replies; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:27 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
Make sure the verifier invalidates the dynptr and dynptr slice derived
from an skb after the skb is freed.
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
.../selftests/bpf/prog_tests/bpf_qdisc.c | 6 ++
.../progs/bpf_qdisc_fail__invalid_dynptr.c | 68 +++++++++++++++++
...f_qdisc_fail__invalid_dynptr_cross_frame.c | 74 +++++++++++++++++++
.../bpf_qdisc_fail__invalid_dynptr_slice.c | 70 ++++++++++++++++++
4 files changed, 218 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr.c
create mode 100644 tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr_cross_frame.c
create mode 100644 tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr_slice.c
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c b/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c
index 730357cd0c9a..65277c8fc887 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c
@@ -8,6 +8,9 @@
#include "bpf_qdisc_fifo.skel.h"
#include "bpf_qdisc_fq.skel.h"
#include "bpf_qdisc_fail__incompl_ops.skel.h"
+#include "bpf_qdisc_fail__invalid_dynptr.skel.h"
+#include "bpf_qdisc_fail__invalid_dynptr_slice.skel.h"
+#include "bpf_qdisc_fail__invalid_dynptr_cross_frame.skel.h"
#define LO_IFINDEX 1
@@ -223,6 +226,9 @@ void test_ns_bpf_qdisc(void)
test_qdisc_attach_to_non_root();
if (test__start_subtest("incompl_ops"))
test_incompl_ops();
+ RUN_TESTS(bpf_qdisc_fail__invalid_dynptr);
+ RUN_TESTS(bpf_qdisc_fail__invalid_dynptr_cross_frame);
+ RUN_TESTS(bpf_qdisc_fail__invalid_dynptr_slice);
}
void serial_test_bpf_qdisc_default(void)
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr.c
new file mode 100644
index 000000000000..1d96f7987a3f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "bpf_experimental.h"
+#include "bpf_qdisc_common.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int proto;
+
+SEC("struct_ops")
+__failure __msg("Expected an initialized dynptr as R1")
+int BPF_PROG(invalid_dynptr, struct sk_buff *skb, struct Qdisc *sch,
+ struct bpf_sk_buff_ptr *to_free)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+
+ bpf_dynptr_from_skb((struct __sk_buff *)skb, 0, &ptr);
+
+ bpf_qdisc_skb_drop(skb, to_free);
+
+ hdr = bpf_dynptr_slice(&ptr, 0, NULL, sizeof(*hdr));
+ if (!hdr)
+ return NET_XMIT_DROP;
+
+ proto = hdr->h_proto;
+
+ return NET_XMIT_DROP;
+}
+
+SEC("struct_ops")
+__auxiliary
+struct sk_buff *BPF_PROG(bpf_qdisc_test_dequeue, struct Qdisc *sch)
+{
+ return NULL;
+}
+
+SEC("struct_ops")
+__auxiliary
+int BPF_PROG(bpf_qdisc_test_init, struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+SEC("struct_ops")
+__auxiliary
+void BPF_PROG(bpf_qdisc_test_reset, struct Qdisc *sch)
+{
+}
+
+SEC("struct_ops")
+__auxiliary
+void BPF_PROG(bpf_qdisc_test_destroy, struct Qdisc *sch)
+{
+}
+
+SEC(".struct_ops")
+struct Qdisc_ops test = {
+ .enqueue = (void *)invalid_dynptr,
+ .dequeue = (void *)bpf_qdisc_test_dequeue,
+ .init = (void *)bpf_qdisc_test_init,
+ .reset = (void *)bpf_qdisc_test_reset,
+ .destroy = (void *)bpf_qdisc_test_destroy,
+ .id = "bpf_qdisc_test",
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr_cross_frame.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr_cross_frame.c
new file mode 100644
index 000000000000..2e23b8593af9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr_cross_frame.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "bpf_experimental.h"
+#include "bpf_qdisc_common.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int proto;
+
+static __noinline int free_skb(struct sk_buff *skb)
+{
+ bpf_kfree_skb(skb);
+ return 0;
+}
+
+SEC("struct_ops")
+__failure __msg("invalid mem access 'scalar'")
+int BPF_PROG(invalid_dynptr_cross_frame, struct sk_buff *skb, struct Qdisc *sch,
+ struct bpf_sk_buff_ptr *to_free)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+
+ bpf_dynptr_from_skb((struct __sk_buff *)skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice(&ptr, 0, NULL, sizeof(*hdr));
+ if (!hdr)
+ return NET_XMIT_DROP;
+
+ free_skb(skb);
+
+ proto = hdr->h_proto;
+
+ return NET_XMIT_DROP;
+}
+
+SEC("struct_ops")
+__auxiliary
+struct sk_buff *BPF_PROG(bpf_qdisc_test_dequeue, struct Qdisc *sch)
+{
+ return NULL;
+}
+
+SEC("struct_ops")
+__auxiliary
+int BPF_PROG(bpf_qdisc_test_init, struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+SEC("struct_ops")
+__auxiliary
+void BPF_PROG(bpf_qdisc_test_reset, struct Qdisc *sch)
+{
+}
+
+SEC("struct_ops")
+__auxiliary
+void BPF_PROG(bpf_qdisc_test_destroy, struct Qdisc *sch)
+{
+}
+
+SEC(".struct_ops")
+struct Qdisc_ops test = {
+ .enqueue = (void *)invalid_dynptr_cross_frame,
+ .dequeue = (void *)bpf_qdisc_test_dequeue,
+ .init = (void *)bpf_qdisc_test_init,
+ .reset = (void *)bpf_qdisc_test_reset,
+ .destroy = (void *)bpf_qdisc_test_destroy,
+ .id = "bpf_qdisc_test",
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr_slice.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr_slice.c
new file mode 100644
index 000000000000..731216c4e45a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__invalid_dynptr_slice.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "bpf_experimental.h"
+#include "bpf_qdisc_common.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int proto;
+
+SEC("struct_ops")
+__failure __msg("invalid mem access 'scalar'")
+int BPF_PROG(invalid_dynptr_slice, struct sk_buff *skb, struct Qdisc *sch,
+ struct bpf_sk_buff_ptr *to_free)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+
+ bpf_dynptr_from_skb((struct __sk_buff *)skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice(&ptr, 0, NULL, sizeof(*hdr));
+ if (!hdr) {
+ bpf_qdisc_skb_drop(skb, to_free);
+ return NET_XMIT_DROP;
+ }
+
+ bpf_qdisc_skb_drop(skb, to_free);
+
+ proto = hdr->h_proto;
+
+ return NET_XMIT_DROP;
+}
+
+SEC("struct_ops")
+__auxiliary
+struct sk_buff *BPF_PROG(bpf_qdisc_test_dequeue, struct Qdisc *sch)
+{
+ return NULL;
+}
+
+SEC("struct_ops")
+__auxiliary
+int BPF_PROG(bpf_qdisc_test_init, struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+SEC("struct_ops")
+__auxiliary
+void BPF_PROG(bpf_qdisc_test_reset, struct Qdisc *sch)
+{
+}
+
+SEC("struct_ops")
+__auxiliary
+void BPF_PROG(bpf_qdisc_test_destroy, struct Qdisc *sch)
+{
+}
+
+SEC(".struct_ops")
+struct Qdisc_ops test = {
+ .enqueue = (void *)invalid_dynptr_slice,
+ .dequeue = (void *)bpf_qdisc_test_dequeue,
+ .init = (void *)bpf_qdisc_test_init,
+ .reset = (void *)bpf_qdisc_test_reset,
+ .destroy = (void *)bpf_qdisc_test_destroy,
+ .id = "bpf_qdisc_test",
+};
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH bpf-next v4 11/12] selftests/bpf: Test using slice after invalidating dynptr clone
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
` (9 preceding siblings ...)
2026-05-06 14:27 ` [PATCH bpf-next v4 10/12] selftests/bpf: Test using dynptr after freeing the underlying object Amery Hung
@ 2026-05-06 14:27 ` Amery Hung
2026-05-06 14:27 ` [PATCH bpf-next v4 12/12] selftests/bpf: Test using file dynptr after the reference on file is dropped Amery Hung
11 siblings, 0 replies; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:27 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
The parent object of a cloned dynptr is skb not the original dynptr.
Invalidate the original dynptr should not prevent the program from
using the slice derived from the cloned dynptr.
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
.../selftests/bpf/prog_tests/bpf_qdisc.c | 2 +
..._qdisc_dynptr_use_after_invalidate_clone.c | 74 +++++++++++++++++++
2 files changed, 76 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/bpf_qdisc_dynptr_use_after_invalidate_clone.c
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c b/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c
index 65277c8fc887..77f1c0550c9b 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c
@@ -11,6 +11,7 @@
#include "bpf_qdisc_fail__invalid_dynptr.skel.h"
#include "bpf_qdisc_fail__invalid_dynptr_slice.skel.h"
#include "bpf_qdisc_fail__invalid_dynptr_cross_frame.skel.h"
+#include "bpf_qdisc_dynptr_use_after_invalidate_clone.skel.h"
#define LO_IFINDEX 1
@@ -229,6 +230,7 @@ void test_ns_bpf_qdisc(void)
RUN_TESTS(bpf_qdisc_fail__invalid_dynptr);
RUN_TESTS(bpf_qdisc_fail__invalid_dynptr_cross_frame);
RUN_TESTS(bpf_qdisc_fail__invalid_dynptr_slice);
+ RUN_TESTS(bpf_qdisc_dynptr_use_after_invalidate_clone);
}
void serial_test_bpf_qdisc_default(void)
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_dynptr_use_after_invalidate_clone.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_dynptr_use_after_invalidate_clone.c
new file mode 100644
index 000000000000..ac626cfa2a98
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_dynptr_use_after_invalidate_clone.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "bpf_experimental.h"
+#include "bpf_qdisc_common.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int proto;
+
+SEC("struct_ops")
+__success
+int BPF_PROG(dynptr_use_after_invalidate_clone, struct sk_buff *skb, struct Qdisc *sch,
+ struct bpf_sk_buff_ptr *to_free)
+{
+ struct bpf_dynptr ptr, ptr_clone;
+ struct ethhdr *hdr;
+
+ bpf_dynptr_from_skb((struct __sk_buff *)skb, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &ptr_clone);
+
+ hdr = bpf_dynptr_slice(&ptr_clone, 0, NULL, sizeof(*hdr));
+ if (!hdr) {
+ bpf_qdisc_skb_drop(skb, to_free);
+ return NET_XMIT_DROP;
+ }
+
+ *(int *)&ptr = 0;
+
+ proto = hdr->h_proto;
+
+ bpf_qdisc_skb_drop(skb, to_free);
+
+ return NET_XMIT_DROP;
+}
+
+SEC("struct_ops")
+__auxiliary
+struct sk_buff *BPF_PROG(bpf_qdisc_test_dequeue, struct Qdisc *sch)
+{
+ return NULL;
+}
+
+SEC("struct_ops")
+__auxiliary
+int BPF_PROG(bpf_qdisc_test_init, struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+SEC("struct_ops")
+__auxiliary
+void BPF_PROG(bpf_qdisc_test_reset, struct Qdisc *sch)
+{
+}
+
+SEC("struct_ops")
+__auxiliary
+void BPF_PROG(bpf_qdisc_test_destroy, struct Qdisc *sch)
+{
+}
+
+SEC(".struct_ops")
+struct Qdisc_ops test = {
+ .enqueue = (void *)dynptr_use_after_invalidate_clone,
+ .dequeue = (void *)bpf_qdisc_test_dequeue,
+ .init = (void *)bpf_qdisc_test_init,
+ .reset = (void *)bpf_qdisc_test_reset,
+ .destroy = (void *)bpf_qdisc_test_destroy,
+ .id = "bpf_qdisc_test",
+};
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread* [PATCH bpf-next v4 12/12] selftests/bpf: Test using file dynptr after the reference on file is dropped
2026-05-06 14:26 [PATCH bpf-next v4 00/12] Refactor verifier object relationship tracking Amery Hung
` (10 preceding siblings ...)
2026-05-06 14:27 ` [PATCH bpf-next v4 11/12] selftests/bpf: Test using slice after invalidating dynptr clone Amery Hung
@ 2026-05-06 14:27 ` Amery Hung
11 siblings, 0 replies; 22+ messages in thread
From: Amery Hung @ 2026-05-06 14:27 UTC (permalink / raw)
To: bpf
Cc: netdev, alexei.starovoitov, andrii, daniel, eddyz87, memxor,
martin.lau, mykyta.yatsenko5, ameryhung, kernel-team
File dynptr and slice should be invalidated when the parent file's
reference is dropped in the program. Without the verifier tracking
dyntpr's parent referenced object, the dynptr would continute to be
incorrectly used even if the underlying file is being tear down or gone.
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
.../selftests/bpf/progs/file_reader_fail.c | 60 +++++++++++++++++++
1 file changed, 60 insertions(+)
diff --git a/tools/testing/selftests/bpf/progs/file_reader_fail.c b/tools/testing/selftests/bpf/progs/file_reader_fail.c
index 0739620dea8a..d5fae5e4cf9a 100644
--- a/tools/testing/selftests/bpf/progs/file_reader_fail.c
+++ b/tools/testing/selftests/bpf/progs/file_reader_fail.c
@@ -50,3 +50,63 @@ int xdp_no_dynptr_type(struct xdp_md *xdp)
bpf_dynptr_file_discard(&dynptr);
return 0;
}
+
+SEC("lsm/file_open")
+__failure
+__msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.")
+int use_file_dynptr_after_put_file(void *ctx)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct file *file = bpf_get_task_exe_file(task);
+ struct bpf_dynptr dynptr;
+ char buf[64];
+
+ if (!file)
+ return 0;
+
+ if (bpf_dynptr_from_file(file, 0, &dynptr))
+ goto out;
+
+ /* this should fail - file dynptr should be discarded first to prevent resource leak */
+ bpf_put_file(file);
+
+ bpf_dynptr_read(buf, sizeof(buf), &dynptr, 0, 0);
+ return 0;
+
+out:
+ bpf_dynptr_file_discard(&dynptr);
+ bpf_put_file(file);
+ return 0;
+}
+
+SEC("lsm/file_open")
+__failure
+__msg("Leaking reference id={{[0-9]+}} alloc_insn={{[0-9]+}}. Release it first.")
+int use_file_dynptr_slice_after_put_file(void *ctx)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct file *file = bpf_get_task_exe_file(task);
+ struct bpf_dynptr dynptr;
+ char *data;
+
+ if (!file)
+ return 0;
+
+ if (bpf_dynptr_from_file(file, 0, &dynptr))
+ goto out;
+
+ data = bpf_dynptr_data(&dynptr, 0, 1);
+ if (!data)
+ goto out;
+
+ /* this should fail - file dynptr should be discarded first to prevent resource leak */
+ bpf_put_file(file);
+
+ *data = 'x';
+ return 0;
+
+out:
+ bpf_dynptr_file_discard(&dynptr);
+ bpf_put_file(file);
+ return 0;
+}
--
2.52.0
^ permalink raw reply related [flat|nested] 22+ messages in thread