From: Eduard Zingerman <eddyz87@gmail.com>
To: bpf@vger.kernel.org, ast@kernel.org, andrii@kernel.org
Cc: daniel@iogearbox.net, martin.lau@linux.dev, kernel-team@fb.com,
yonghong.song@linux.dev, eddyz87@gmail.com
Subject: [PATCH bpf-next 14/14] bpf: poison dead stack slots
Date: Wed, 8 Apr 2026 18:33:16 -0700 [thread overview]
Message-ID: <20260408-patch-set-v1-14-1a666e860d42@gmail.com> (raw)
In-Reply-To: <20260408-patch-set-v1-0-1a666e860d42@gmail.com>
From: Alexei Starovoitov <ast@kernel.org>
As a sanity check poison stack slots that stack liveness determined
to be dead, so that any read from such slots will cause program rejection.
If stack liveness logic is incorrect the poison can cause
valid program to be rejected, but it also will prevent unsafe program
to be accepted.
Allow global subprogs "read" poisoned stack slots.
The static stack liveness determined that subprog doesn't read certain
stack slots, but sizeof(arg_type) based global subprog validation
isn't accurate enough to know which slots will actually be read by
the callee, so it needs to check full sizeof(arg_type) at the caller.
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
include/linux/bpf_verifier.h | 1 +
kernel/bpf/log.c | 5 +-
kernel/bpf/verifier.c | 80 ++++++++++++++++------
.../selftests/bpf/progs/verifier_spill_fill.c | 2 +
4 files changed, 64 insertions(+), 24 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 75f3ae48431cbb675d76af3d899064cafed2f9ee..1f9ee77aedd7e6747e46c973606d15584a984075 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -220,6 +220,7 @@ enum bpf_stack_slot_type {
STACK_DYNPTR,
STACK_ITER,
STACK_IRQ_FLAG,
+ STACK_POISON,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index f0902ecb7df69b6d43deaf808b4c0f00cc3438f2..d5779a3426d9773ff36595ff4df2bf070c096070 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -542,7 +542,8 @@ static char slot_type_char[] = {
[STACK_ZERO] = '0',
[STACK_DYNPTR] = 'd',
[STACK_ITER] = 'i',
- [STACK_IRQ_FLAG] = 'f'
+ [STACK_IRQ_FLAG] = 'f',
+ [STACK_POISON] = 'p',
};
#define UNUM_MAX_DECIMAL U16_MAX
@@ -779,7 +780,7 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifie
for (j = 0; j < BPF_REG_SIZE; j++) {
slot_type = state->stack[i].slot_type[j];
- if (slot_type != STACK_INVALID)
+ if (slot_type != STACK_INVALID && slot_type != STACK_POISON)
valid = true;
types_buf[j] = slot_type_char[slot_type];
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2bd8511aa1508a96c23e93df174f64708dafb4b5..e0219be1c0aadd132419c639686372d67d9c4864 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1327,6 +1327,7 @@ static bool is_stack_slot_special(const struct bpf_stack_state *stack)
case STACK_IRQ_FLAG:
return true;
case STACK_INVALID:
+ case STACK_POISON:
case STACK_MISC:
case STACK_ZERO:
return false;
@@ -1369,14 +1370,14 @@ static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype)
{
if (*stype == STACK_ZERO)
return;
- if (*stype == STACK_INVALID)
+ if (*stype == STACK_INVALID || *stype == STACK_POISON)
return;
*stype = STACK_MISC;
}
static void scrub_spilled_slot(u8 *stype)
{
- if (*stype != STACK_INVALID)
+ if (*stype != STACK_INVALID && *stype != STACK_POISON)
*stype = STACK_MISC;
}
@@ -5574,7 +5575,8 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
* that may or may not be written because, if we're reject
* them, the error would be too confusing.
*/
- if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
+ if ((*stype == STACK_INVALID || *stype == STACK_POISON) &&
+ !env->allow_uninit_stack) {
verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
insn_idx, i);
return -EINVAL;
@@ -5710,8 +5712,13 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
}
if (type == STACK_INVALID && env->allow_uninit_stack)
continue;
- verbose(env, "invalid read from stack off %d+%d size %d\n",
- off, i, size);
+ if (type == STACK_POISON) {
+ verbose(env, "reading from stack off %d+%d size %d, slot poisoned by dead code elimination\n",
+ off, i, size);
+ } else {
+ verbose(env, "invalid read from stack off %d+%d size %d\n",
+ off, i, size);
+ }
return -EACCES;
}
@@ -5760,8 +5767,13 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
continue;
if (type == STACK_INVALID && env->allow_uninit_stack)
continue;
- verbose(env, "invalid read from stack off %d+%d size %d\n",
- off, i, size);
+ if (type == STACK_POISON) {
+ verbose(env, "reading from stack off %d+%d size %d, slot poisoned by dead code elimination\n",
+ off, i, size);
+ } else {
+ verbose(env, "invalid read from stack off %d+%d size %d\n",
+ off, i, size);
+ }
return -EACCES;
}
if (dst_regno >= 0)
@@ -8326,16 +8338,22 @@ static int check_stack_range_initialized(
/* Some accesses can write anything into the stack, others are
* read-only.
*/
- bool clobber = false;
+ bool clobber = type == BPF_WRITE;
+ /*
+ * Negative access_size signals global subprog/kfunc arg check where
+ * STACK_POISON slots are acceptable. static stack liveness
+ * might have determined that subprog doesn't read them,
+ * but BTF based global subprog validation isn't accurate enough.
+ */
+ bool allow_poison = access_size < 0 || clobber;
+
+ access_size = abs(access_size);
if (access_size == 0 && !zero_size_allowed) {
verbose(env, "invalid zero-sized read\n");
return -EACCES;
}
- if (type == BPF_WRITE)
- clobber = true;
-
err = check_stack_access_within_bounds(env, regno, off, access_size, type);
if (err)
return err;
@@ -8434,7 +8452,12 @@ static int check_stack_range_initialized(
goto mark;
}
- if (tnum_is_const(reg->var_off)) {
+ if (*stype == STACK_POISON) {
+ if (allow_poison)
+ goto mark;
+ verbose(env, "reading from stack R%d off %d+%d size %d, slot poisoned by dead code elimination\n",
+ regno, min_off, i - min_off, access_size);
+ } else if (tnum_is_const(reg->var_off)) {
verbose(env, "invalid read from stack R%d off %d+%d size %d\n",
regno, min_off, i - min_off, access_size);
} else {
@@ -8617,8 +8640,10 @@ static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg
mark_ptr_not_null_reg(reg);
}
- err = check_helper_mem_access(env, regno, mem_size, BPF_READ, true, NULL);
- err = err ?: check_helper_mem_access(env, regno, mem_size, BPF_WRITE, true, NULL);
+ int size = base_type(reg->type) == PTR_TO_STACK ? -(int)mem_size : mem_size;
+
+ err = check_helper_mem_access(env, regno, size, BPF_READ, true, NULL);
+ err = err ?: check_helper_mem_access(env, regno, size, BPF_WRITE, true, NULL);
if (may_be_null)
*reg = saved_reg;
@@ -20078,7 +20103,7 @@ static void __clean_func_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, spill);
}
for (j = start; j < end; j++)
- st->stack[i].slot_type[j] = STACK_INVALID;
+ st->stack[i].slot_type[j] = STACK_POISON;
}
}
}
@@ -20407,7 +20432,8 @@ static bool is_stack_misc_after(struct bpf_verifier_env *env,
for (i = im; i < ARRAY_SIZE(stack->slot_type); ++i) {
if ((stack->slot_type[i] == STACK_MISC) ||
- (stack->slot_type[i] == STACK_INVALID && env->allow_uninit_stack))
+ ((stack->slot_type[i] == STACK_INVALID || stack->slot_type[i] == STACK_POISON) &&
+ env->allow_uninit_stack))
continue;
return false;
}
@@ -20443,13 +20469,22 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
spi = i / BPF_REG_SIZE;
- if (exact == EXACT &&
- (i >= cur->allocated_stack ||
- old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
- cur->stack[spi].slot_type[i % BPF_REG_SIZE]))
- return false;
+ if (exact == EXACT) {
+ u8 old_type = old->stack[spi].slot_type[i % BPF_REG_SIZE];
+ u8 cur_type = i < cur->allocated_stack ?
+ cur->stack[spi].slot_type[i % BPF_REG_SIZE] : STACK_INVALID;
+
+ /* STACK_INVALID and STACK_POISON are equivalent for pruning */
+ if (old_type == STACK_POISON)
+ old_type = STACK_INVALID;
+ if (cur_type == STACK_POISON)
+ cur_type = STACK_INVALID;
+ if (i >= cur->allocated_stack || old_type != cur_type)
+ return false;
+ }
- if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
+ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID ||
+ old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_POISON)
continue;
if (env->allow_uninit_stack &&
@@ -20547,6 +20582,7 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
case STACK_MISC:
case STACK_ZERO:
case STACK_INVALID:
+ case STACK_POISON:
continue;
/* Ensure that new unhandled slot types return false by default */
default:
diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
index c6ae64b99cd6ef8af8dd8d341c06033bac0f61de..6bc721accbae001a3e16396dc69037071534e324 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -780,6 +780,8 @@ __naked void stack_load_preserves_const_precision_subreg(void)
"r1 += r2;"
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+ "r2 = *(u64 *)(r10 -8);" /* keep slots alive */
+ "r2 = *(u64 *)(r10 -16);"
"r0 = 0;"
"exit;"
:
--
2.53.0
prev parent reply other threads:[~2026-04-09 1:33 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-09 1:33 [PATCH bpf-next 00/14] bpf: static stack liveness data flow analysis Eduard Zingerman
2026-04-09 1:33 ` [PATCH bpf-next 01/14] bpf: share several utility functions as internal API Eduard Zingerman
2026-04-09 1:33 ` [PATCH bpf-next 02/14] bpf: save subprogram name in bpf_subprog_info Eduard Zingerman
2026-04-09 2:14 ` bot+bpf-ci
2026-04-09 1:33 ` [PATCH bpf-next 03/14] bpf: Add spis_*() helpers for 4-byte stack slot bitmasks Eduard Zingerman
2026-04-09 3:12 ` bot+bpf-ci
2026-04-09 1:33 ` [PATCH bpf-next 04/14] bpf: make liveness.c track stack with 4-byte granularity Eduard Zingerman
2026-04-09 2:26 ` bot+bpf-ci
2026-04-09 1:33 ` [PATCH bpf-next 05/14] bpf: 4-byte precise clean_verifier_state Eduard Zingerman
2026-04-09 1:33 ` [PATCH bpf-next 06/14] bpf: prepare bpf_liveness api for use by static analysis pass Eduard Zingerman
2026-04-09 1:33 ` [PATCH bpf-next 07/14] bpf: introduce forward arg-tracking dataflow analysis Eduard Zingerman
2026-04-09 2:26 ` bot+bpf-ci
2026-04-09 1:33 ` [PATCH bpf-next 08/14] bpf: record arg tracking results in bpf_liveness masks Eduard Zingerman
2026-04-09 2:26 ` bot+bpf-ci
2026-04-09 1:33 ` [PATCH bpf-next 09/14] bpf: simplify liveness to use (callsite, depth) keyed func_instances Eduard Zingerman
2026-04-09 2:26 ` bot+bpf-ci
2026-04-09 1:33 ` [PATCH bpf-next 10/14] bpf: change logging scheme for live stack analysis Eduard Zingerman
2026-04-09 2:14 ` bot+bpf-ci
2026-04-09 1:33 ` [PATCH bpf-next 11/14] selftests/bpf: update existing tests due to liveness changes Eduard Zingerman
2026-04-09 1:33 ` [PATCH bpf-next 12/14] selftests/bpf: adjust verifier_log buffers Eduard Zingerman
2026-04-09 1:33 ` [PATCH bpf-next 13/14] selftests/bpf: add new tests for static stack liveness analysis Eduard Zingerman
2026-04-09 1:33 ` Eduard Zingerman [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260408-patch-set-v1-14-1a666e860d42@gmail.com \
--to=eddyz87@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=kernel-team@fb.com \
--cc=martin.lau@linux.dev \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox