From: Yonghong Song <yonghong.song@linux.dev>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
Andrii Nakryiko <andrii@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
"Jose E . Marchesi" <jose.marchesi@oracle.com>,
kernel-team@fb.com, Martin KaFai Lau <martin.lau@kernel.org>
Subject: [PATCH bpf-next 04/18] bpf: Extend liveness analysis to track stack argument slots
Date: Fri, 24 Apr 2026 10:14:54 -0700 [thread overview]
Message-ID: <20260424171454.2035580-1-yonghong.song@linux.dev> (raw)
In-Reply-To: <20260424171433.2034470-1-yonghong.song@linux.dev>
BPF_REG_PARAMS (R11) is at index MAX_BPF_REG, which is beyond the
register tracking arrays in const_fold.c and liveness.c. Handle it
explicitly to avoid out-of-bounds accesses.
In liveness.c, extend the arg tracking dataflow to cover stack arg
slots. Otherwise, pointers passed through stack args are invisible
to liveness, causing the pointed-to stack slots to be incorrectly
poisoned.
Add a parallel tracking array (at_stack_arg_out/at_stack_arg_entry)
in arg_track_xfer() to record FP-derived values stored to outgoing
stack arg slots and to restore them on incoming reads. Extend
record_call_access() to check stack arg slots for FP-derived
pointers at kfunc call sites, reusing the record_arg_access() helper
extracted in the previous patch. Pass stack arg state from caller to
callee in analyze_subprog() so that callees can track pointers
received through stack args.
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
---
include/linux/bpf_verifier.h | 2 +
kernel/bpf/const_fold.c | 13 +++-
kernel/bpf/liveness.c | 133 ++++++++++++++++++++++++++++++++---
3 files changed, 135 insertions(+), 13 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 735f33ad3db7..de723d27da94 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -905,6 +905,8 @@ struct bpf_verifier_env {
struct bpf_jmp_history_entry *cur_hist_ent;
/* Per-callsite copy of parent's converged at_stack_in for cross-frame fills. */
struct arg_track **callsite_at_stack;
+ /* Per-callsite stack arg state for cross-frame stack arg tracking. */
+ struct arg_track **callsite_at_stack_arg;
u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
diff --git a/kernel/bpf/const_fold.c b/kernel/bpf/const_fold.c
index db73c4740b1e..b65285d61efe 100644
--- a/kernel/bpf/const_fold.c
+++ b/kernel/bpf/const_fold.c
@@ -51,13 +51,22 @@ static void const_reg_xfer(struct bpf_verifier_env *env, struct const_arg_info *
struct bpf_insn *insn, struct bpf_insn *insns, int idx)
{
struct const_arg_info unknown = { .state = CONST_ARG_UNKNOWN, .val = 0 };
- struct const_arg_info *dst = &ci_out[insn->dst_reg];
- struct const_arg_info *src = &ci_out[insn->src_reg];
+ struct const_arg_info *dst, *src;
u8 class = BPF_CLASS(insn->code);
u8 mode = BPF_MODE(insn->code);
u8 opcode = BPF_OP(insn->code) | BPF_SRC(insn->code);
int r;
+ /* Stack arguments use BPF_REG_PARAMS which is outside the tracked register set. */
+ if (insn->dst_reg == BPF_REG_PARAMS)
+ return;
+ if (insn->src_reg == BPF_REG_PARAMS) {
+ ci_out[insn->dst_reg] = unknown;
+ return;
+ }
+
+ dst = &ci_out[insn->dst_reg];
+ src = &ci_out[insn->src_reg];
switch (class) {
case BPF_ALU:
case BPF_ALU64:
diff --git a/kernel/bpf/liveness.c b/kernel/bpf/liveness.c
index 1b45621d652c..b60b5fdae9b9 100644
--- a/kernel/bpf/liveness.c
+++ b/kernel/bpf/liveness.c
@@ -610,6 +610,18 @@ enum arg_track_state {
/* Track callee stack slots fp-8 through fp-512 (64 slots of 8 bytes each) */
#define MAX_ARG_SPILL_SLOTS 64
+/* Stack arg slots: outgoing at -(i+1)*8, incoming at +(i+1)*8 */
+#define MAX_STACK_ARG_SLOTS (MAX_BPF_FUNC_ARGS - MAX_BPF_FUNC_REG_ARGS)
+
+static int stack_arg_off_to_slot(s16 off)
+{
+ int aoff = off < 0 ? -off : off;
+
+ if (aoff / 8 > MAX_STACK_ARG_SLOTS)
+ return -1;
+ return aoff / 8 - 1;
+}
+
static bool arg_is_visited(const struct arg_track *at)
{
return at->frame != ARG_UNVISITED;
@@ -1062,17 +1074,37 @@ static bool can_be_local_fp(int depth, int regno, struct arg_track *at)
static void arg_track_xfer(struct bpf_verifier_env *env, struct bpf_insn *insn,
int insn_idx,
struct arg_track *at_out, struct arg_track *at_stack_out,
+ struct arg_track *at_stack_arg_out,
+ const struct arg_track *at_stack_arg_entry,
struct func_instance *instance,
u32 *callsites)
{
int depth = instance->depth;
u8 class = BPF_CLASS(insn->code);
u8 code = BPF_OP(insn->code);
- struct arg_track *dst = &at_out[insn->dst_reg];
- struct arg_track *src = &at_out[insn->src_reg];
+ struct arg_track *dst, *src;
struct arg_track none = { .frame = ARG_NONE };
- int r;
+ int r, slot;
+
+ /* Handle stack arg stores and loads. */
+ if (insn->dst_reg == BPF_REG_PARAMS) {
+ slot = stack_arg_off_to_slot(insn->off);
+ if (slot >= 0) {
+ if (class == BPF_STX)
+ at_stack_arg_out[slot] = at_out[insn->src_reg];
+ else
+ at_stack_arg_out[slot] = none;
+ }
+ return;
+ }
+ if (insn->src_reg == BPF_REG_PARAMS) {
+ slot = stack_arg_off_to_slot(insn->off);
+ at_out[insn->dst_reg] = (slot >= 0) ? at_stack_arg_entry[slot] : none;
+ return;
+ }
+ dst = &at_out[insn->dst_reg];
+ src = &at_out[insn->src_reg];
if (class == BPF_ALU64 && BPF_SRC(insn->code) == BPF_K) {
if (code == BPF_MOV) {
*dst = none;
@@ -1385,6 +1417,7 @@ static int record_arg_access(struct bpf_verifier_env *env,
static int record_call_access(struct bpf_verifier_env *env,
struct func_instance *instance,
struct arg_track *at,
+ struct arg_track *at_stack_arg,
int insn_idx)
{
struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
@@ -1402,6 +1435,15 @@ static int record_call_access(struct bpf_verifier_env *env,
if (err)
return err > 0 ? 0 : err;
}
+
+ if (num_params <= MAX_BPF_FUNC_REG_ARGS)
+ return 0;
+ for (r = 0; r < MAX_STACK_ARG_SLOTS && r < num_params - MAX_BPF_FUNC_REG_ARGS; r++) {
+ err = record_arg_access(env, instance, insn, &at_stack_arg[r],
+ r + MAX_BPF_FUNC_REG_ARGS, insn_idx);
+ if (err)
+ return err > 0 ? 0 : err;
+ }
return 0;
}
@@ -1545,6 +1587,7 @@ static void print_subprog_arg_access(struct bpf_verifier_env *env,
static int compute_subprog_args(struct bpf_verifier_env *env,
struct subprog_at_info *info,
struct arg_track *callee_entry,
+ struct arg_track *callee_stack_arg_entry,
struct func_instance *instance,
u32 *callsites)
{
@@ -1560,6 +1603,9 @@ static int compute_subprog_args(struct bpf_verifier_env *env,
struct arg_track at_out[MAX_BPF_REG];
struct arg_track (*at_stack_in)[MAX_ARG_SPILL_SLOTS] = NULL;
struct arg_track *at_stack_out = NULL;
+ struct arg_track (*at_stack_arg_in)[MAX_STACK_ARG_SLOTS] = NULL;
+ struct arg_track at_stack_arg_out[MAX_STACK_ARG_SLOTS];
+ struct arg_track at_stack_arg_entry[MAX_STACK_ARG_SLOTS];
struct arg_track unvisited = { .frame = ARG_UNVISITED };
struct arg_track none = { .frame = ARG_NONE };
bool changed;
@@ -1569,6 +1615,10 @@ static int compute_subprog_args(struct bpf_verifier_env *env,
if (!at_in)
goto err_free;
+ at_stack_arg_in = kvmalloc_objs(*at_stack_arg_in, len, GFP_KERNEL_ACCOUNT);
+ if (!at_stack_arg_in)
+ goto err_free;
+
at_stack_in = kvmalloc_objs(*at_stack_in, len, GFP_KERNEL_ACCOUNT);
if (!at_stack_in)
goto err_free;
@@ -1582,6 +1632,8 @@ static int compute_subprog_args(struct bpf_verifier_env *env,
at_in[i][r] = unvisited;
for (r = 0; r < MAX_ARG_SPILL_SLOTS; r++)
at_stack_in[i][r] = unvisited;
+ for (r = 0; r < MAX_STACK_ARG_SLOTS; r++)
+ at_stack_arg_in[i][r] = unvisited;
}
for (r = 0; r < MAX_BPF_REG; r++)
@@ -1600,6 +1652,12 @@ static int compute_subprog_args(struct bpf_verifier_env *env,
for (r = 0; r < MAX_ARG_SPILL_SLOTS; r++)
at_stack_in[0][r] = none;
+ /* Entry: incoming stack args from caller, or ARG_NONE for main */
+ for (r = 0; r < MAX_STACK_ARG_SLOTS; r++) {
+ at_stack_arg_entry[r] = callee_stack_arg_entry ? callee_stack_arg_entry[r] : none;
+ at_stack_arg_in[0][r] = none;
+ }
+
if (env->log.level & BPF_LOG_LEVEL2)
verbose(env, "subprog#%d: analyzing (depth %d)...\n", subprog, depth);
@@ -1617,8 +1675,10 @@ static int compute_subprog_args(struct bpf_verifier_env *env,
memcpy(at_out, at_in[i], sizeof(at_out));
memcpy(at_stack_out, at_stack_in[i], MAX_ARG_SPILL_SLOTS * sizeof(*at_stack_out));
+ memcpy(at_stack_arg_out, at_stack_arg_in[i], sizeof(at_stack_arg_out));
- arg_track_xfer(env, insn, idx, at_out, at_stack_out, instance, callsites);
+ arg_track_xfer(env, insn, idx, at_out, at_stack_out,
+ at_stack_arg_out, at_stack_arg_entry, instance, callsites);
arg_track_log(env, insn, idx, at_in[i], at_stack_in[i], at_out, at_stack_out);
/* Propagate to successors within this subprogram */
@@ -1639,6 +1699,18 @@ static int compute_subprog_args(struct bpf_verifier_env *env,
for (r = 0; r < MAX_ARG_SPILL_SLOTS; r++)
changed |= arg_track_join(env, idx, target, -r - 1,
&at_stack_in[ti][r], at_stack_out[r]);
+
+ /*
+ * Stack arg slots use indices after spill slots:
+ * -(r + MAX_ARG_SPILL_SLOTS + 1) so that index * 8 in verbose logging
+ * yields unique FP-relative offsets that don't * collide with spill slot
+ * offsets -8..-512.
+ */
+ for (r = 0; r < MAX_STACK_ARG_SLOTS; r++)
+ changed |= arg_track_join(env, idx, target,
+ -(r + MAX_ARG_SPILL_SLOTS + 1),
+ &at_stack_arg_in[ti][r],
+ at_stack_arg_out[r]);
}
}
if (changed)
@@ -1655,7 +1727,7 @@ static int compute_subprog_args(struct bpf_verifier_env *env,
goto err_free;
if (insn->code == (BPF_JMP | BPF_CALL)) {
- err = record_call_access(env, instance, at_in[i], idx);
+ err = record_call_access(env, instance, at_in[i], at_stack_arg_in[i], idx);
if (err)
goto err_free;
}
@@ -1671,6 +1743,17 @@ static int compute_subprog_args(struct bpf_verifier_env *env,
}
memcpy(env->callsite_at_stack[idx],
at_stack_in[i], sizeof(struct arg_track) * MAX_ARG_SPILL_SLOTS);
+
+ kvfree(env->callsite_at_stack_arg[idx]);
+ env->callsite_at_stack_arg[idx] =
+ kvmalloc_objs(*env->callsite_at_stack_arg[idx],
+ MAX_STACK_ARG_SLOTS, GFP_KERNEL_ACCOUNT);
+ if (!env->callsite_at_stack_arg[idx]) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ memcpy(env->callsite_at_stack_arg[idx],
+ at_stack_arg_in[i], sizeof(struct arg_track) * MAX_STACK_ARG_SLOTS);
}
}
@@ -1683,6 +1766,7 @@ static int compute_subprog_args(struct bpf_verifier_env *env,
err_free:
kvfree(at_stack_out);
kvfree(at_stack_in);
+ kvfree(at_stack_arg_in);
kvfree(at_in);
return err;
}
@@ -1696,6 +1780,16 @@ static bool has_fp_args(struct arg_track *args)
return false;
}
+static bool has_fp_stack_args(struct arg_track *stack_args)
+{
+ if (!stack_args)
+ return false;
+ for (int r = 0; r < MAX_STACK_ARG_SLOTS; r++)
+ if (arg_is_fp(&stack_args[r]))
+ return true;
+ return false;
+}
+
/*
* Merge a freshly analyzed instance into the original.
* may_read: union (any pass might read the slot).
@@ -1768,6 +1862,7 @@ static void free_instance(struct func_instance *instance)
*/
static int analyze_subprog(struct bpf_verifier_env *env,
struct arg_track *entry_args,
+ struct arg_track *entry_stack_args,
struct subprog_at_info *info,
struct func_instance *instance,
u32 *callsites)
@@ -1809,7 +1904,8 @@ static int analyze_subprog(struct bpf_verifier_env *env,
kvfree(info[subprog].at_in);
info[subprog].at_in = NULL;
- err = compute_subprog_args(env, &info[subprog], entry_args, instance, callsites);
+ err = compute_subprog_args(env, &info[subprog], entry_args, entry_stack_args, instance,
+ callsites);
if (err)
goto out_free;
@@ -1817,6 +1913,7 @@ static int analyze_subprog(struct bpf_verifier_env *env,
for (int p = po_start; p < po_end; p++) {
int idx = env->cfg.insn_postorder[p];
struct arg_track callee_args[BPF_REG_5 + 1];
+ struct arg_track *callee_stack_args = NULL;
struct arg_track none = { .frame = ARG_NONE };
struct bpf_insn *insn = &insns[idx];
struct func_instance *callee_instance;
@@ -1834,6 +1931,7 @@ static int analyze_subprog(struct bpf_verifier_env *env,
/* Build entry args: R1-R5 from at_in at call site */
for (int r = BPF_REG_1; r <= BPF_REG_5; r++)
callee_args[r] = info[subprog].at_in[j][r];
+ callee_stack_args = env->callsite_at_stack_arg[idx];
} else if (bpf_calls_callback(env, idx)) {
callee = find_callback_subprog(env, insn, idx, &caller_reg, &cb_callee_reg);
if (callee == -2) {
@@ -1860,7 +1958,7 @@ static int analyze_subprog(struct bpf_verifier_env *env,
continue;
}
- if (!has_fp_args(callee_args))
+ if (!has_fp_args(callee_args) && !has_fp_stack_args(callee_stack_args))
continue;
if (depth == MAX_CALL_FRAMES - 1) {
@@ -1874,7 +1972,8 @@ static int analyze_subprog(struct bpf_verifier_env *env,
goto out_free;
}
callsites[depth] = idx;
- err = analyze_subprog(env, callee_args, info, callee_instance, callsites);
+ err = analyze_subprog(env, callee_args, callee_stack_args, info, callee_instance,
+ callsites);
if (err)
goto out_free;
@@ -1926,13 +2025,21 @@ int bpf_compute_subprog_arg_access(struct bpf_verifier_env *env)
kvfree(info);
return -ENOMEM;
}
+ env->callsite_at_stack_arg = kvzalloc_objs(*env->callsite_at_stack_arg, insn_cnt,
+ GFP_KERNEL_ACCOUNT);
+ if (!env->callsite_at_stack_arg) {
+ kvfree(env->callsite_at_stack);
+ env->callsite_at_stack = NULL;
+ kvfree(info);
+ return -ENOMEM;
+ }
instance = call_instance(env, NULL, 0, 0);
if (IS_ERR(instance)) {
err = PTR_ERR(instance);
goto out;
}
- err = analyze_subprog(env, NULL, info, instance, callsites);
+ err = analyze_subprog(env, NULL, NULL, info, instance, callsites);
if (err)
goto out;
@@ -1958,7 +2065,7 @@ int bpf_compute_subprog_arg_access(struct bpf_verifier_env *env)
err = PTR_ERR(instance);
goto out;
}
- err = analyze_subprog(env, NULL, info, instance, callsites);
+ err = analyze_subprog(env, NULL, NULL, info, instance, callsites);
if (err)
goto out;
}
@@ -1967,10 +2074,14 @@ int bpf_compute_subprog_arg_access(struct bpf_verifier_env *env)
err = print_instances(env);
out:
- for (k = 0; k < insn_cnt; k++)
+ for (k = 0; k < insn_cnt; k++) {
kvfree(env->callsite_at_stack[k]);
+ kvfree(env->callsite_at_stack_arg[k]);
+ }
kvfree(env->callsite_at_stack);
env->callsite_at_stack = NULL;
+ kvfree(env->callsite_at_stack_arg);
+ env->callsite_at_stack_arg = NULL;
for (k = 0; k < env->subprog_cnt; k++)
kvfree(info[k].at_in);
kvfree(info);
--
2.52.0
next prev parent reply other threads:[~2026-04-24 17:14 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-24 17:14 [PATCH bpf-next 00/18] bpf: Support stack arguments for BPF functions and kfuncs Yonghong Song
2026-04-24 17:14 ` [PATCH bpf-next 01/18] bpf: Support stack arguments for bpf functions Yonghong Song
2026-04-24 18:13 ` bot+bpf-ci
2026-04-25 5:09 ` Yonghong Song
2026-04-27 20:40 ` Yonghong Song
2026-04-28 14:29 ` Eduard Zingerman
2026-04-28 16:47 ` Yonghong Song
2026-04-28 23:50 ` Yonghong Song
2026-04-29 0:28 ` Eduard Zingerman
2026-04-24 17:14 ` [PATCH bpf-next 02/18] bpf: Add precision marking and backtracking for stack argument slots Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-25 5:10 ` Yonghong Song
2026-04-28 16:46 ` Eduard Zingerman
2026-04-28 20:54 ` Yonghong Song
2026-04-24 17:14 ` [PATCH bpf-next 03/18] bpf: Refactor record_call_access() to extract per-arg logic Yonghong Song
2026-04-29 0:51 ` Eduard Zingerman
2026-04-24 17:14 ` Yonghong Song [this message]
2026-04-24 18:00 ` [PATCH bpf-next 04/18] bpf: Extend liveness analysis to track stack argument slots bot+bpf-ci
2026-04-25 5:11 ` Yonghong Song
2026-04-24 17:14 ` [PATCH bpf-next 05/18] bpf: Reject stack arguments in non-JITed programs Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-24 17:15 ` [PATCH bpf-next 06/18] bpf: Prepare architecture JIT support for stack arguments Yonghong Song
2026-04-24 17:48 ` bot+bpf-ci
2026-04-25 5:17 ` Yonghong Song
2026-04-24 17:15 ` [PATCH bpf-next 07/18] bpf: Enable r11 based insns Yonghong Song
2026-04-24 17:15 ` [PATCH bpf-next 08/18] bpf: Support stack arguments for kfunc calls Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-25 5:19 ` Yonghong Song
2026-04-24 17:15 ` [PATCH bpf-next 09/18] bpf: Reject stack arguments if tail call reachable Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-24 17:15 ` [PATCH bpf-next 10/18] bpf,x86: Implement JIT support for stack arguments Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-25 5:29 ` Yonghong Song
2026-04-24 17:16 ` [PATCH bpf-next 11/18] selftests/bpf: Add tests for BPF function " Yonghong Song
2026-04-24 17:16 ` [PATCH bpf-next 12/18] selftests/bpf: Add tests for stack argument validation Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 13/18] selftests/bpf: Add verifier " Yonghong Song
2026-04-24 17:48 ` bot+bpf-ci
2026-04-25 5:33 ` Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 14/18] selftests/bpf: Add BTF fixup for __naked subprog parameter names Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 15/18] selftests/bpf: Add precision backtracking test for stack arguments Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 16/18] bpf, arm64: Map BPF_REG_0 to x8 instead of x7 Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 17/18] bpf, arm64: Add JIT support for stack arguments Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-27 9:06 ` Puranjay Mohan
2026-04-27 20:42 ` Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 18/18] selftests/bpf: Enable stack argument tests for arm64 Yonghong Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260424171454.2035580-1-yonghong.song@linux.dev \
--to=yonghong.song@linux.dev \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=jose.marchesi@oracle.com \
--cc=kernel-team@fb.com \
--cc=martin.lau@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox