From: Yonghong Song <yonghong.song@linux.dev>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
Andrii Nakryiko <andrii@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
"Jose E . Marchesi" <jose.marchesi@oracle.com>,
kernel-team@fb.com, Martin KaFai Lau <martin.lau@kernel.org>
Subject: [PATCH bpf-next v2 06/23] bpf: Refactor jmp history to use dedicated spi/frame fields
Date: Thu, 7 May 2026 14:30:13 -0700 [thread overview]
Message-ID: <20260507213013.1128023-1-yonghong.song@linux.dev> (raw)
In-Reply-To: <20260507212942.1122000-1-yonghong.song@linux.dev>
Move stack slot index (spi) and frame number out of the flags field
in bpf_jmp_history_entry into dedicated bitfields. This simplifies
the encoding and makes room for new flags.
Previously, spi and frame were packed into the lower 9 bits of the
12-bit flags field (3 bits frame + 6 bits spi), with INSN_F_STACK_ACCESS
at BIT(9) and INSN_F_DST/SRC_REG_STACK at BIT(10)/BIT(11).
But this has no room for an INSN_F_* flag for stack arguments.
To resolve this issue, bpf_jmp_history_entry field idx is narrowed to
20 bits (sufficient for insn indices up to 1M), and the freed bits hold
spi (6 bits) and frame (3 bits) as dedicated struct fields. The flags
enum is simplified accordingly:
INSN_F_STACK_ACCESS -> BIT(0)
INSN_F_DST_REG_STACK -> BIT(1)
INSN_F_SRC_REG_STACK -> BIT(2)
which allows more room for additional INSN_F_* flags.
bpf_push_jmp_history() now takes explicit spi and frame parameters
instead of encoding them into flags. The insn_stack_access_flags(),
insn_stack_access_spi(), and insn_stack_access_frameno() helpers are
removed.
No functional change.
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
---
include/linux/bpf_verifier.h | 34 ++++++++++++++--------------------
kernel/bpf/backtrack.c | 23 +++++++++--------------
kernel/bpf/states.c | 2 +-
kernel/bpf/verifier.c | 22 +++++++++++-----------
4 files changed, 35 insertions(+), 46 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 1fba16fced28..2d5c8f36f451 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -435,31 +435,22 @@ struct bpf_func_state {
#define MAX_CALL_FRAMES 8
-/* instruction history flags, used in bpf_jmp_history_entry.flags field */
+/* instruction history flags, used in bpf_jmp_history_entry.flags field.
+ * Frame number and SPI are stored in dedicated fields of bpf_jmp_history_entry.
+ */
enum {
- /* instruction references stack slot through PTR_TO_STACK register;
- * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
- * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
- * 8 bytes per slot, so slot index (spi) is [0, 63])
- */
- INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
-
- INSN_F_SPI_MASK = 0x3f, /* 6 bits */
- INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
+ INSN_F_STACK_ACCESS = BIT(0),
- INSN_F_STACK_ACCESS = BIT(9),
-
- INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
- INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
- /* total 12 bits are used now. */
+ INSN_F_DST_REG_STACK = BIT(1), /* dst_reg is PTR_TO_STACK */
+ INSN_F_SRC_REG_STACK = BIT(2), /* src_reg is PTR_TO_STACK */
};
-static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
-static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
-
struct bpf_jmp_history_entry {
- u32 idx;
/* insn idx can't be bigger than 1 million */
+ u32 idx : 20;
+ u32 frame : 3; /* stack access frame number */
+ u32 spi : 6; /* stack slot index (0..63) */
+ u32 : 3;
u32 prev_idx : 20;
/* special INSN_F_xxx flags */
u32 flags : 12;
@@ -469,6 +460,9 @@ struct bpf_jmp_history_entry {
u64 linked_regs;
};
+static_assert(MAX_CALL_FRAMES <= (1 << 3));
+static_assert(MAX_BPF_STACK / 8 <= (1 << 6));
+
/* Maximum number of register states that can exist at once */
#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
struct bpf_verifier_state {
@@ -1180,7 +1174,7 @@ struct list_head *bpf_explored_state(struct bpf_verifier_env *env, int idx);
void bpf_free_verifier_state(struct bpf_verifier_state *state, bool free_self);
void bpf_free_backedges(struct bpf_scc_visit *visit);
int bpf_push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
- int insn_flags, u64 linked_regs);
+ int insn_flags, int spi, int frame, u64 linked_regs);
void bpf_bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_jmp_history_entry *hist);
void bpf_mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);
diff --git a/kernel/bpf/backtrack.c b/kernel/bpf/backtrack.c
index 854731dc93fe..93738a076c88 100644
--- a/kernel/bpf/backtrack.c
+++ b/kernel/bpf/backtrack.c
@@ -9,7 +9,7 @@
/* for any branch, call, exit record the history of jmps in the given state */
int bpf_push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
- int insn_flags, u64 linked_regs)
+ int insn_flags, int spi, int frame, u64 linked_regs)
{
u32 cnt = cur->jmp_history_cnt;
struct bpf_jmp_history_entry *p;
@@ -25,6 +25,8 @@ int bpf_push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state
env, "insn history: insn_idx %d cur flags %x new flags %x",
env->insn_idx, env->cur_hist_ent->flags, insn_flags);
env->cur_hist_ent->flags |= insn_flags;
+ env->cur_hist_ent->spi = spi;
+ env->cur_hist_ent->frame = frame;
verifier_bug_if(env->cur_hist_ent->linked_regs != 0, env,
"insn history: insn_idx %d linked_regs: %#llx",
env->insn_idx, env->cur_hist_ent->linked_regs);
@@ -43,6 +45,8 @@ int bpf_push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state
p->idx = env->insn_idx;
p->prev_idx = env->prev_insn_idx;
p->flags = insn_flags;
+ p->spi = spi;
+ p->frame = frame;
p->linked_regs = linked_regs;
cur->jmp_history_cnt = cnt;
env->cur_hist_ent = p;
@@ -64,15 +68,6 @@ static bool is_atomic_fetch_insn(const struct bpf_insn *insn)
(insn->imm & BPF_FETCH);
}
-static int insn_stack_access_spi(int insn_flags)
-{
- return (insn_flags >> INSN_F_SPI_SHIFT) & INSN_F_SPI_MASK;
-}
-
-static int insn_stack_access_frameno(int insn_flags)
-{
- return insn_flags & INSN_F_FRAMENO_MASK;
-}
/* Backtrack one insn at a time. If idx is not at the top of recorded
* history then previous instruction came from straight line execution.
@@ -353,8 +348,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
* that [fp - off] slot contains scalar that needs to be
* tracked with precision
*/
- spi = insn_stack_access_spi(hist->flags);
- fr = insn_stack_access_frameno(hist->flags);
+ spi = hist->spi;
+ fr = hist->frame;
bpf_bt_set_frame_slot(bt, fr, spi);
} else if (class == BPF_STX || class == BPF_ST) {
if (bt_is_reg_set(bt, dreg))
@@ -366,8 +361,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
/* scalars can only be spilled into stack */
if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
return 0;
- spi = insn_stack_access_spi(hist->flags);
- fr = insn_stack_access_frameno(hist->flags);
+ spi = hist->spi;
+ fr = hist->frame;
if (!bt_is_frame_slot_set(bt, fr, spi))
return 0;
bt_clear_frame_slot(bt, fr, spi);
diff --git a/kernel/bpf/states.c b/kernel/bpf/states.c
index c249eb40c6d6..45d86bfe3b68 100644
--- a/kernel/bpf/states.c
+++ b/kernel/bpf/states.c
@@ -1400,7 +1400,7 @@ int bpf_is_state_visited(struct bpf_verifier_env *env, int insn_idx)
*/
err = 0;
if (bpf_is_jmp_point(env, env->insn_idx))
- err = bpf_push_jmp_history(env, cur, 0, 0);
+ err = bpf_push_jmp_history(env, cur, 0, 0, 0, 0);
err = err ? : propagate_precision(env, &sl->state, cur, NULL);
if (err)
return err;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 61d6663498f7..010efdb766ac 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3198,10 +3198,6 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
return __check_reg_arg(env, state->regs, regno, t);
}
-static int insn_stack_access_flags(int frameno, int spi)
-{
- return INSN_F_STACK_ACCESS | (spi << INSN_F_SPI_SHIFT) | frameno;
-}
static void mark_indirect_target(struct bpf_verifier_env *env, int idx)
{
@@ -3517,7 +3513,8 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
struct bpf_reg_state *reg = NULL;
- int insn_flags = insn_stack_access_flags(state->frameno, spi);
+ int insn_flags = INSN_F_STACK_ACCESS;
+ int hist_spi = spi, hist_frame = state->frameno;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
* so it's aligned access and [off, off + size) are within stack limits
@@ -3613,7 +3610,8 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
}
if (insn_flags)
- return bpf_push_jmp_history(env, env->cur_state, insn_flags, 0);
+ return bpf_push_jmp_history(env, env->cur_state, insn_flags,
+ hist_spi, hist_frame, 0);
return 0;
}
@@ -3809,7 +3807,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
struct bpf_reg_state *reg;
u8 *stype, type;
- int insn_flags = insn_stack_access_flags(reg_state->frameno, spi);
+ int insn_flags = INSN_F_STACK_ACCESS;
+ int hist_spi = spi, hist_frame = reg_state->frameno;
stype = reg_state->stack[spi].slot_type;
reg = ®_state->stack[spi].spilled_ptr;
@@ -3940,7 +3939,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
insn_flags = 0; /* we are not restoring spilled register */
}
if (insn_flags)
- return bpf_push_jmp_history(env, env->cur_state, insn_flags, 0);
+ return bpf_push_jmp_history(env, env->cur_state, insn_flags,
+ hist_spi, hist_frame, 0);
return 0;
}
@@ -15905,7 +15905,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
}
if (insn_flags) {
- err = bpf_push_jmp_history(env, this_branch, insn_flags, 0);
+ err = bpf_push_jmp_history(env, this_branch, insn_flags, 0, 0, 0);
if (err)
return err;
}
@@ -15969,7 +15969,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (dst_reg->type == SCALAR_VALUE && dst_reg->id)
collect_linked_regs(env, this_branch, dst_reg->id, &linked_regs);
if (linked_regs.cnt > 1) {
- err = bpf_push_jmp_history(env, this_branch, 0, linked_regs_pack(&linked_regs));
+ err = bpf_push_jmp_history(env, this_branch, 0, 0, 0, linked_regs_pack(&linked_regs));
if (err)
return err;
}
@@ -17275,7 +17275,7 @@ static int do_check(struct bpf_verifier_env *env)
}
if (bpf_is_jmp_point(env, env->insn_idx)) {
- err = bpf_push_jmp_history(env, state, 0, 0);
+ err = bpf_push_jmp_history(env, state, 0, 0, 0, 0);
if (err)
return err;
}
--
2.53.0-Meta
next prev parent reply other threads:[~2026-05-07 21:30 UTC|newest]
Thread overview: 68+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-07 21:29 [PATCH bpf-next v2 00/23] bpf: Support stack arguments for BPF functions and kfuncs Yonghong Song
2026-05-07 21:29 ` [PATCH bpf-next v2 01/23] bpf: Convert bpf_get_spilled_reg macro to static inline function Yonghong Song
2026-05-07 21:29 ` [PATCH bpf-next v2 02/23] bpf: Remove copy_register_state wrapper function Yonghong Song
2026-05-07 21:29 ` [PATCH bpf-next v2 03/23] bpf: Add helper functions for r11-based stack argument insns Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 04/23] bpf: Set sub->arg_cnt earlier in btf_prepare_func_args() Yonghong Song
2026-05-07 22:11 ` bot+bpf-ci
2026-05-09 13:05 ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 05/23] bpf: Support stack arguments for bpf functions Yonghong Song
2026-05-07 22:26 ` bot+bpf-ci
2026-05-09 12:52 ` Yonghong Song
2026-05-08 18:00 ` Alexei Starovoitov
2026-05-09 12:55 ` Yonghong Song
2026-05-07 21:30 ` Yonghong Song [this message]
2026-05-07 21:30 ` [PATCH bpf-next v2 07/23] bpf: Add precision marking and backtracking for stack argument slots Yonghong Song
2026-05-07 22:11 ` bot+bpf-ci
2026-05-09 13:08 ` Yonghong Song
2026-05-09 4:05 ` sashiko-bot
2026-05-10 16:41 ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 08/23] bpf: Refactor record_call_access() to extract per-arg logic Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 09/23] bpf: Extend liveness analysis to track stack argument slots Yonghong Song
2026-05-07 22:11 ` bot+bpf-ci
2026-05-09 13:29 ` Yonghong Song
2026-05-09 0:59 ` sashiko-bot
2026-05-10 16:47 ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 10/23] bpf: Reject stack arguments in non-JITed programs Yonghong Song
2026-05-07 22:11 ` bot+bpf-ci
2026-05-09 2:10 ` sashiko-bot
2026-05-10 16:59 ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 11/23] bpf: Prepare architecture JIT support for stack arguments Yonghong Song
2026-05-09 2:19 ` sashiko-bot
2026-05-10 17:05 ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 12/23] bpf: Enable r11 based insns Yonghong Song
2026-05-09 2:59 ` sashiko-bot
2026-05-10 17:11 ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 13/23] bpf: Support stack arguments for kfunc calls Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 14/23] bpf: Reject stack arguments if tail call reachable Yonghong Song
2026-05-07 22:11 ` bot+bpf-ci
2026-05-09 1:42 ` sashiko-bot
2026-05-10 17:15 ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 15/23] bpf,x86: Implement JIT support for stack arguments Yonghong Song
2026-05-07 22:26 ` bot+bpf-ci
2026-05-10 17:21 ` Yonghong Song
2026-05-09 2:21 ` sashiko-bot
2026-05-10 17:22 ` Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 16/23] selftests/bpf: Add tests for BPF function " Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 17/23] selftests/bpf: Add tests for stack argument validation Yonghong Song
2026-05-09 1:30 ` sashiko-bot
2026-05-10 17:23 ` Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 18/23] selftests/bpf: Add BTF fixup for __naked subprog parameter names Yonghong Song
2026-05-09 1:40 ` sashiko-bot
2026-05-10 17:24 ` Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 19/23] selftests/bpf: Add verifier tests for stack argument validation Yonghong Song
2026-05-07 22:11 ` bot+bpf-ci
2026-05-10 17:27 ` Yonghong Song
2026-05-09 1:38 ` sashiko-bot
2026-05-10 17:27 ` Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 20/23] selftests/bpf: Add precision backtracking test for stack arguments Yonghong Song
2026-05-09 1:52 ` sashiko-bot
2026-05-10 17:31 ` Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 21/23] bpf, arm64: Map BPF_REG_0 to x8 instead of x7 Yonghong Song
2026-05-08 18:01 ` Alexei Starovoitov
2026-05-09 13:44 ` Yonghong Song
2026-05-07 21:32 ` [PATCH bpf-next v2 22/23] bpf, arm64: Add JIT support for stack arguments Yonghong Song
2026-05-09 2:15 ` sashiko-bot
2026-05-10 17:32 ` Yonghong Song
2026-05-07 21:32 ` [PATCH bpf-next v2 23/23] selftests/bpf: Enable stack argument tests for arm64 Yonghong Song
2026-05-08 18:06 ` [PATCH bpf-next v2 00/23] bpf: Support stack arguments for BPF functions and kfuncs Alexei Starovoitov
2026-05-09 13:43 ` Yonghong Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260507213013.1128023-1-yonghong.song@linux.dev \
--to=yonghong.song@linux.dev \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=jose.marchesi@oracle.com \
--cc=kernel-team@fb.com \
--cc=martin.lau@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox