From: Yonghong Song <yonghong.song@linux.dev>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
Andrii Nakryiko <andrii@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
"Jose E . Marchesi" <jose.marchesi@oracle.com>,
kernel-team@fb.com, Martin KaFai Lau <martin.lau@kernel.org>
Subject: [PATCH bpf-next 02/18] bpf: Add precision marking and backtracking for stack argument slots
Date: Fri, 24 Apr 2026 10:14:43 -0700 [thread overview]
Message-ID: <20260424171443.2034958-1-yonghong.song@linux.dev> (raw)
In-Reply-To: <20260424171433.2034470-1-yonghong.song@linux.dev>
Extend the precision marking and backtracking infrastructure to
support stack argument slots (r11-based accesses). Without this,
precision demands for scalar values passed through stack arguments
are silently dropped, which could allow the verifier to incorrectly
prune states with different constant values in stack arg slots.
INSN_F_STACK_ARG_ACCESS is encoded as INSN_F_STACK_ACCESS |
INSN_F_DST_REG_STACK (BIT(9) | BIT(10)). This is safe because
INSN_F_STACK_ACCESS is only used for ST/STX/LDX insns while
INSN_F_DST_REG_STACK is only used for JMP insns — they never appear
on the same instruction. This keeps the total within the 12-bit
jmp_history flags budget.
Three components are added:
1. Jump history recording for stack arg accesses:
- check_stack_arg_write() records INSN_F_STACK_ARG_ACCESS for
outgoing stores.
- check_stack_arg_read() records INSN_F_STACK_ARG_ACCESS for
incoming loads.
2. backtrack_insn() handling:
- BPF_LDX: when backtracking through an incoming stack arg load,
transfer precision demand from the destination register to the
stack arg slot mask.
- BPF_STX/BPF_ST: when backtracking through an outgoing stack arg
store, transfer precision demand from the stack arg slot to the
source register.
- Call boundary: when exiting a callee back to the caller,
propagate the callee's incoming stack arg precision bits to the
caller's outgoing stack arg slots. The slot index maps directly
(slot i in callee corresponds to slot i in caller) since the
caller's stack_arg_regs only contains outgoing slots.
3. bpf_mark_chain_precision() state walking:
- When iterating parent states, mark stack_arg_regs[spi].precise
for slots that have pending precision demand.
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
---
include/linux/bpf_verifier.h | 13 ++++++++
kernel/bpf/backtrack.c | 61 ++++++++++++++++++++++++++++++++++--
kernel/bpf/verifier.c | 30 +++++++++++++++---
3 files changed, 98 insertions(+), 6 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 2cc349d7fc17..735f33ad3db7 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -393,6 +393,13 @@ enum {
INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
INSN_F_STACK_ACCESS = BIT(9),
+ /*
+ * INSN_F_STACK_ARG_ACCESS uses INSN_F_STACK_ACCESS | INSN_F_DST_REG_STACK.
+ * This is safe because INSN_F_DST_REG_STACK is only used for JMP insns
+ * while INSN_F_STACK_ACCESS is only used for ST/STX/LDX insns — they
+ * never appear on the same instruction.
+ */
+ INSN_F_STACK_ARG_ACCESS = BIT(9) | BIT(10),
INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
@@ -775,6 +782,7 @@ struct backtrack_state {
u32 frame;
u32 reg_masks[MAX_CALL_FRAMES];
u64 stack_masks[MAX_CALL_FRAMES];
+ u8 stack_arg_masks[MAX_CALL_FRAMES];
};
struct bpf_id_pair {
@@ -1173,6 +1181,11 @@ static inline void bpf_bt_set_frame_slot(struct backtrack_state *bt, u32 frame,
bt->stack_masks[frame] |= 1ull << slot;
}
+static inline void bt_set_frame_stack_arg_slot(struct backtrack_state *bt, u32 frame, u32 slot)
+{
+ bt->stack_arg_masks[frame] |= 1 << slot;
+}
+
static inline bool bt_is_frame_reg_set(struct backtrack_state *bt, u32 frame, u32 reg)
{
return bt->reg_masks[frame] & (1 << reg);
diff --git a/kernel/bpf/backtrack.c b/kernel/bpf/backtrack.c
index 854731dc93fe..73da7eaac47f 100644
--- a/kernel/bpf/backtrack.c
+++ b/kernel/bpf/backtrack.c
@@ -135,11 +135,21 @@ static inline u32 bt_empty(struct backtrack_state *bt)
int i;
for (i = 0; i <= bt->frame; i++)
- mask |= bt->reg_masks[i] | bt->stack_masks[i];
+ mask |= bt->reg_masks[i] | bt->stack_masks[i] | bt->stack_arg_masks[i];
return mask == 0;
}
+static inline void bt_clear_frame_stack_arg_slot(struct backtrack_state *bt, u32 frame, u32 slot)
+{
+ bt->stack_arg_masks[frame] &= ~(1 << slot);
+}
+
+static inline bool bt_is_frame_stack_arg_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
+{
+ return bt->stack_arg_masks[frame] & (1 << slot);
+}
+
static inline int bt_subprog_enter(struct backtrack_state *bt)
{
if (bt->frame == MAX_CALL_FRAMES - 1) {
@@ -200,6 +210,11 @@ static inline u64 bt_stack_mask(struct backtrack_state *bt)
return bt->stack_masks[bt->frame];
}
+static inline u8 bt_stack_arg_mask(struct backtrack_state *bt)
+{
+ return bt->stack_arg_masks[bt->frame];
+}
+
static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
{
return bt->reg_masks[bt->frame] & (1 << reg);
@@ -341,6 +356,13 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
return 0;
bt_clear_reg(bt, load_reg);
+ if (hist && (hist->flags & INSN_F_STACK_ARG_ACCESS) == INSN_F_STACK_ARG_ACCESS) {
+ spi = insn_stack_access_spi(hist->flags);
+ fr = insn_stack_access_frameno(hist->flags);
+ bt_set_frame_stack_arg_slot(bt, fr, spi);
+ return 0;
+ }
+
/* scalars can only be spilled into stack w/o losing precision.
* Load from any other memory can be zero extended.
* The desire to keep that precision is already indicated
@@ -363,6 +385,18 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
* encountered a case of pointer subtraction.
*/
return -ENOTSUPP;
+
+ if (hist && (hist->flags & INSN_F_STACK_ARG_ACCESS) == INSN_F_STACK_ARG_ACCESS) {
+ spi = insn_stack_access_spi(hist->flags);
+ fr = insn_stack_access_frameno(hist->flags);
+ if (!bt_is_frame_stack_arg_slot_set(bt, fr, spi))
+ return 0;
+ bt_clear_frame_stack_arg_slot(bt, fr, spi);
+ if (class == BPF_STX)
+ bt_set_reg(bt, sreg);
+ return 0;
+ }
+
/* scalars can only be spilled into stack */
if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
return 0;
@@ -431,6 +465,17 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
bpf_bt_set_frame_reg(bt, bt->frame - 1, i);
}
}
+ /* propagate callee's incoming stack arg precision
+ * to caller's outgoing stack arg slots
+ */
+ if (bt_stack_arg_mask(bt)) {
+ for (i = 0; i < MAX_BPF_FUNC_ARGS - MAX_BPF_FUNC_REG_ARGS; i++) {
+ if (!bt_is_frame_stack_arg_slot_set(bt, bt->frame, i))
+ continue;
+ bt_clear_frame_stack_arg_slot(bt, bt->frame, i);
+ bt_set_frame_stack_arg_slot(bt, bt->frame - 1, i);
+ }
+ }
if (bt_subprog_exit(bt))
return -EFAULT;
return 0;
@@ -453,9 +498,10 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
bt_stack_mask(bt));
return -EFAULT;
}
- /* clear r1-r5 in callback subprog's mask */
+ /* clear r1-r5 and stack arg slots in callback subprog's mask */
for (i = BPF_REG_1; i <= BPF_REG_5; i++)
bt_clear_reg(bt, i);
+ bt->stack_arg_masks[bt->frame] = 0;
if (bt_subprog_exit(bt))
return -EFAULT;
return 0;
@@ -901,6 +947,17 @@ int bpf_mark_chain_precision(struct bpf_verifier_env *env,
*changed = true;
}
}
+ for (i = 0; i < func->out_stack_arg_depth / BPF_REG_SIZE; i++) {
+ if (!bt_is_frame_stack_arg_slot_set(bt, fr, i))
+ continue;
+ reg = &func->stack_arg_regs[i];
+ if (reg->type != SCALAR_VALUE || reg->precise) {
+ bt_clear_frame_stack_arg_slot(bt, fr, i);
+ } else {
+ reg->precise = true;
+ *changed = true;
+ }
+ }
if (env->log.level & BPF_LOG_LEVEL2) {
fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
bt_frame_reg_mask(bt, fr));
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index bcf81692a22b..e041c182c614 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3563,6 +3563,11 @@ static int insn_stack_access_flags(int frameno, int spi)
return INSN_F_STACK_ACCESS | (spi << INSN_F_SPI_SHIFT) | frameno;
}
+static int insn_stack_arg_access_flags(int frameno, int spi)
+{
+ return INSN_F_STACK_ARG_ACCESS | (spi << INSN_F_SPI_SHIFT) | frameno;
+}
+
static void mark_indirect_target(struct bpf_verifier_env *env, int idx)
{
env->insn_aux_data[idx].indirect_target = true;
@@ -4484,7 +4489,8 @@ static int check_stack_arg_write(struct bpf_verifier_env *env, struct bpf_func_s
__mark_reg_known(arg, env->prog->insnsi[env->insn_idx].imm);
}
state->no_stack_arg_load = true;
- return 0;
+ return bpf_push_jmp_history(env, env->cur_state,
+ insn_stack_arg_access_flags(state->frameno, spi), 0);
}
/*
@@ -4519,7 +4525,17 @@ static int check_stack_arg_read(struct bpf_verifier_env *env, struct bpf_func_st
copy_register_state(&cur->regs[dst_regno], arg);
else
mark_reg_unknown(env, cur->regs, dst_regno);
- return 0;
+ return bpf_push_jmp_history(env, env->cur_state,
+ insn_stack_arg_access_flags(state->frameno, spi), 0);
+}
+
+static int mark_stack_arg_precision(struct bpf_verifier_env *env, int arg_idx)
+{
+ struct bpf_func_state *caller = cur_func(env);
+ int spi = arg_idx - MAX_BPF_FUNC_REG_ARGS;
+
+ bt_set_frame_stack_arg_slot(&env->bt, caller->frameno, spi);
+ return mark_chain_precision_batch(env, env->cur_state);
}
static int check_outgoing_stack_args(struct bpf_verifier_env *env, struct bpf_func_state *caller,
@@ -7269,8 +7285,14 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
}
err = check_helper_mem_access(env, mem_reg, mem_argno, size_reg->umax_value,
access_type, zero_size_allowed, meta);
- if (!err)
- err = mark_chain_precision(env, reg_from_argno(size_argno));
+ if (!err) {
+ int regno = reg_from_argno(size_argno);
+
+ if (regno >= 0)
+ err = mark_chain_precision(env, regno);
+ else
+ err = mark_stack_arg_precision(env, arg_from_argno(size_argno) - 1);
+ }
return err;
}
--
2.52.0
next prev parent reply other threads:[~2026-04-24 17:14 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-24 17:14 [PATCH bpf-next 00/18] bpf: Support stack arguments for BPF functions and kfuncs Yonghong Song
2026-04-24 17:14 ` [PATCH bpf-next 01/18] bpf: Support stack arguments for bpf functions Yonghong Song
2026-04-24 18:13 ` bot+bpf-ci
2026-04-25 5:09 ` Yonghong Song
2026-04-27 20:40 ` Yonghong Song
2026-04-28 14:29 ` Eduard Zingerman
2026-04-28 16:47 ` Yonghong Song
2026-04-28 23:50 ` Yonghong Song
2026-04-29 0:28 ` Eduard Zingerman
2026-04-24 17:14 ` Yonghong Song [this message]
2026-04-24 18:00 ` [PATCH bpf-next 02/18] bpf: Add precision marking and backtracking for stack argument slots bot+bpf-ci
2026-04-25 5:10 ` Yonghong Song
2026-04-28 16:46 ` Eduard Zingerman
2026-04-28 20:54 ` Yonghong Song
2026-04-24 17:14 ` [PATCH bpf-next 03/18] bpf: Refactor record_call_access() to extract per-arg logic Yonghong Song
2026-04-29 0:51 ` Eduard Zingerman
2026-04-24 17:14 ` [PATCH bpf-next 04/18] bpf: Extend liveness analysis to track stack argument slots Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-25 5:11 ` Yonghong Song
2026-04-24 17:14 ` [PATCH bpf-next 05/18] bpf: Reject stack arguments in non-JITed programs Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-24 17:15 ` [PATCH bpf-next 06/18] bpf: Prepare architecture JIT support for stack arguments Yonghong Song
2026-04-24 17:48 ` bot+bpf-ci
2026-04-25 5:17 ` Yonghong Song
2026-04-24 17:15 ` [PATCH bpf-next 07/18] bpf: Enable r11 based insns Yonghong Song
2026-04-24 17:15 ` [PATCH bpf-next 08/18] bpf: Support stack arguments for kfunc calls Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-25 5:19 ` Yonghong Song
2026-04-24 17:15 ` [PATCH bpf-next 09/18] bpf: Reject stack arguments if tail call reachable Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-24 17:15 ` [PATCH bpf-next 10/18] bpf,x86: Implement JIT support for stack arguments Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-25 5:29 ` Yonghong Song
2026-04-24 17:16 ` [PATCH bpf-next 11/18] selftests/bpf: Add tests for BPF function " Yonghong Song
2026-04-24 17:16 ` [PATCH bpf-next 12/18] selftests/bpf: Add tests for stack argument validation Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 13/18] selftests/bpf: Add verifier " Yonghong Song
2026-04-24 17:48 ` bot+bpf-ci
2026-04-25 5:33 ` Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 14/18] selftests/bpf: Add BTF fixup for __naked subprog parameter names Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 15/18] selftests/bpf: Add precision backtracking test for stack arguments Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 16/18] bpf, arm64: Map BPF_REG_0 to x8 instead of x7 Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 17/18] bpf, arm64: Add JIT support for stack arguments Yonghong Song
2026-04-24 18:00 ` bot+bpf-ci
2026-04-27 9:06 ` Puranjay Mohan
2026-04-27 20:42 ` Yonghong Song
2026-04-24 17:17 ` [PATCH bpf-next 18/18] selftests/bpf: Enable stack argument tests for arm64 Yonghong Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260424171443.2034958-1-yonghong.song@linux.dev \
--to=yonghong.song@linux.dev \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=jose.marchesi@oracle.com \
--cc=kernel-team@fb.com \
--cc=martin.lau@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox