From: Yonghong Song <yonghong.song@linux.dev>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
Andrii Nakryiko <andrii@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
"Jose E . Marchesi" <jose.marchesi@oracle.com>,
kernel-team@fb.com, Martin KaFai Lau <martin.lau@kernel.org>
Subject: [PATCH bpf-next v6 03/17] bpf: Refactor to handle memory and size together
Date: Sun, 19 Apr 2026 09:33:31 -0700 [thread overview]
Message-ID: <20260419163331.733278-1-yonghong.song@linux.dev> (raw)
In-Reply-To: <20260419163316.731019-1-yonghong.song@linux.dev>
Similar to the previous patch, try to pass bpf_reg_state from caller
to callee. Both mem_reg and size_reg are passed to helper functions.
This is important for stack arguments as they may be beyond registers 1-5.
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
---
kernel/bpf/verifier.c | 59 ++++++++++++++++++++++---------------------
1 file changed, 30 insertions(+), 29 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 514fee971f96..3716d9688d00 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6940,12 +6940,12 @@ static int check_stack_range_initialized(
return 0;
}
-static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+static int check_helper_mem_access(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int regno,
int access_size, enum bpf_access_type access_type,
bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
- struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
+ struct bpf_reg_state *regs = cur_regs(env);
u32 *max_access;
switch (base_type(reg->type)) {
@@ -7028,15 +7028,17 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
/* verify arguments to helpers or kfuncs consisting of a pointer and an access
* size.
*
- * @regno is the register containing the access size. regno-1 is the register
- * containing the pointer.
+ * @mem_regno is the register containing the pointer, mem_regno+1 is the register
+ * containing the access size.
*/
static int check_mem_size_reg(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, u32 regno,
+ struct bpf_reg_state *mem_reg,
+ struct bpf_reg_state *size_reg, u32 mem_regno,
enum bpf_access_type access_type,
bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
+ int size_regno = mem_regno + 1;
int err;
/* This is used to refine r0 return value bounds for helpers
@@ -7047,37 +7049,37 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
* out. Only upper bounds can be learned because retval is an
* int type and negative retvals are allowed.
*/
- meta->msize_max_value = reg->umax_value;
+ meta->msize_max_value = size_reg->umax_value;
/* The register is SCALAR_VALUE; the access check happens using
* its boundaries. For unprivileged variable accesses, disable
* raw mode so that the program is required to initialize all
* the memory that the helper could just partially fill up.
*/
- if (!tnum_is_const(reg->var_off))
+ if (!tnum_is_const(size_reg->var_off))
meta = NULL;
- if (reg->smin_value < 0) {
+ if (size_reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
- regno);
+ size_regno);
return -EACCES;
}
- if (reg->umin_value == 0 && !zero_size_allowed) {
+ if (size_reg->umin_value == 0 && !zero_size_allowed) {
verbose(env, "R%d invalid zero-sized read: u64=[%lld,%lld]\n",
- regno, reg->umin_value, reg->umax_value);
+ size_regno, size_reg->umin_value, size_reg->umax_value);
return -EACCES;
}
- if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
+ if (size_reg->umax_value >= BPF_MAX_VAR_SIZ) {
verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
- regno);
+ size_regno);
return -EACCES;
}
- err = check_helper_mem_access(env, regno - 1, reg->umax_value,
+ err = check_helper_mem_access(env, mem_reg, mem_regno, size_reg->umax_value,
access_type, zero_size_allowed, meta);
if (!err)
- err = mark_chain_precision(env, regno);
+ err = mark_chain_precision(env, size_regno);
return err;
}
@@ -7102,8 +7104,8 @@ static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg
int size = base_type(reg->type) == PTR_TO_STACK ? -(int)mem_size : mem_size;
- err = check_helper_mem_access(env, regno, size, BPF_READ, true, NULL);
- err = err ?: check_helper_mem_access(env, regno, size, BPF_WRITE, true, NULL);
+ err = check_helper_mem_access(env, reg, regno, size, BPF_READ, true, NULL);
+ err = err ?: check_helper_mem_access(env, reg, regno, size, BPF_WRITE, true, NULL);
if (may_be_null)
*reg = saved_reg;
@@ -7111,16 +7113,15 @@ static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg
return err;
}
-static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- u32 regno)
+static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *mem_reg,
+ struct bpf_reg_state *size_reg, u32 mem_regno)
{
- struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
bool may_be_null = type_may_be_null(mem_reg->type);
struct bpf_reg_state saved_reg;
struct bpf_call_arg_meta meta;
int err;
- WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
+ WARN_ON_ONCE(mem_regno > BPF_REG_4);
memset(&meta, 0, sizeof(meta));
@@ -7129,8 +7130,8 @@ static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg
mark_ptr_not_null_reg(mem_reg);
}
- err = check_mem_size_reg(env, reg, regno, BPF_READ, true, &meta);
- err = err ?: check_mem_size_reg(env, reg, regno, BPF_WRITE, true, &meta);
+ err = check_mem_size_reg(env, mem_reg, size_reg, mem_regno, BPF_READ, true, &meta);
+ err = err ?: check_mem_size_reg(env, mem_reg, size_reg, mem_regno, BPF_WRITE, true, &meta);
if (may_be_null)
*mem_reg = saved_reg;
@@ -8594,7 +8595,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
return -EFAULT;
}
key_size = meta->map.ptr->key_size;
- err = check_helper_mem_access(env, regno, key_size, BPF_READ, false, NULL);
+ err = check_helper_mem_access(env, reg, regno, key_size, BPF_READ, false, NULL);
if (err)
return err;
if (can_elide_value_nullness(meta->map.ptr->map_type)) {
@@ -8621,7 +8622,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
return -EFAULT;
}
meta->raw_mode = arg_type & MEM_UNINIT;
- err = check_helper_mem_access(env, regno, meta->map.ptr->value_size,
+ err = check_helper_mem_access(env, reg, regno, meta->map.ptr->value_size,
arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ,
false, meta);
break;
@@ -8665,7 +8666,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
*/
meta->raw_mode = arg_type & MEM_UNINIT;
if (arg_type & MEM_FIXED_SIZE) {
- err = check_helper_mem_access(env, regno, fn->arg_size[arg],
+ err = check_helper_mem_access(env, reg, regno, fn->arg_size[arg],
arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ,
false, meta);
if (err)
@@ -8675,13 +8676,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
}
break;
case ARG_CONST_SIZE:
- err = check_mem_size_reg(env, reg, regno,
+ err = check_mem_size_reg(env, reg_state(env, regno - 1), reg, regno - 1,
fn->arg_type[arg - 1] & MEM_WRITE ?
BPF_WRITE : BPF_READ,
false, meta);
break;
case ARG_CONST_SIZE_OR_ZERO:
- err = check_mem_size_reg(env, reg, regno,
+ err = check_mem_size_reg(env, reg_state(env, regno - 1), reg, regno - 1,
fn->arg_type[arg - 1] & MEM_WRITE ?
BPF_WRITE : BPF_READ,
true, meta);
@@ -12415,7 +12416,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
const struct btf_param *size_arg = &args[i + 1];
if (!bpf_register_is_null(buff_reg) || !is_kfunc_arg_nullable(meta->btf, buff_arg)) {
- ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1);
+ ret = check_kfunc_mem_size_reg(env, buff_reg, size_reg, regno);
if (ret < 0) {
verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1);
return ret;
--
2.52.0
next prev parent reply other threads:[~2026-04-19 16:33 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-19 16:33 [PATCH bpf-next v6 00/17] bpf: Support stack arguments for BPF functions and kfuncs Yonghong Song
2026-04-19 16:33 ` [PATCH bpf-next v6 01/17] bpf: Remove unused parameter from check_map_kptr_access() Yonghong Song
2026-04-19 16:33 ` [PATCH bpf-next v6 02/17] bpf: Refactor to avoid redundant calculation of bpf_reg_state Yonghong Song
2026-04-19 16:33 ` Yonghong Song [this message]
2026-04-20 23:58 ` [PATCH bpf-next v6 03/17] bpf: Refactor to handle memory and size together Alexei Starovoitov
2026-04-21 4:04 ` Yonghong Song
2026-04-19 16:33 ` [PATCH bpf-next v6 04/17] bpf: Prepare verifier logs for upcoming kfunc stack arguments Yonghong Song
2026-04-21 0:03 ` Alexei Starovoitov
2026-04-21 4:06 ` Yonghong Song
2026-04-21 6:07 ` Yonghong Song
2026-04-21 13:48 ` Alexei Starovoitov
2026-04-21 15:41 ` Yonghong Song
2026-04-21 15:46 ` Alexei Starovoitov
2026-04-21 16:37 ` Yonghong Song
2026-04-21 17:24 ` Yonghong Song
2026-04-19 16:33 ` [PATCH bpf-next v6 05/17] bpf: Introduce bpf register BPF_REG_PARAMS Yonghong Song
2026-04-19 17:06 ` sashiko-bot
2026-04-19 18:14 ` Yonghong Song
2026-04-19 16:33 ` [PATCH bpf-next v6 06/17] bpf: Reuse MAX_BPF_FUNC_ARGS for maximum number of arguments Yonghong Song
2026-04-19 16:33 ` [PATCH bpf-next v6 07/17] bpf: Support stack arguments for bpf functions Yonghong Song
2026-04-19 19:15 ` sashiko-bot
2026-04-20 4:35 ` Yonghong Song
2026-04-21 0:37 ` Alexei Starovoitov
2026-04-21 4:15 ` Yonghong Song
2026-04-19 16:33 ` [PATCH bpf-next v6 08/17] bpf: Reject stack arguments in non-JITed programs Yonghong Song
2026-04-19 18:21 ` sashiko-bot
2026-04-20 4:23 ` Yonghong Song
2026-04-19 16:34 ` [PATCH bpf-next v6 09/17] bpf: Track r11 registers in const_fold and liveness Yonghong Song
2026-04-19 16:34 ` [PATCH bpf-next v6 10/17] bpf: Prepare architecture JIT support for stack arguments Yonghong Song
2026-04-19 16:34 ` [PATCH bpf-next v6 11/17] bpf: Enable r11 based insns Yonghong Song
2026-04-19 16:34 ` [PATCH bpf-next v6 12/17] bpf: Support stack arguments for kfunc calls Yonghong Song
2026-04-19 17:08 ` sashiko-bot
2026-04-19 18:18 ` Yonghong Song
2026-04-19 16:34 ` [PATCH bpf-next v6 13/17] bpf: Reject stack arguments if tail call reachable Yonghong Song
2026-04-19 17:08 ` sashiko-bot
2026-04-19 18:20 ` Yonghong Song
2026-04-19 16:34 ` [PATCH bpf-next v6 14/17] bpf,x86: Implement JIT support for stack arguments Yonghong Song
2026-04-19 17:25 ` sashiko-bot
2026-04-19 18:55 ` Yonghong Song
2026-04-19 16:34 ` [PATCH bpf-next v6 15/17] selftests/bpf: Add tests for BPF function " Yonghong Song
2026-04-19 17:15 ` sashiko-bot
2026-04-20 5:52 ` Yonghong Song
2026-04-19 16:34 ` [PATCH bpf-next v6 16/17] selftests/bpf: Add tests for stack argument validation Yonghong Song
2026-04-19 16:34 ` [PATCH bpf-next v6 17/17] selftests/bpf: Add verifier " Yonghong Song
2026-04-19 17:21 ` sashiko-bot
2026-04-20 6:14 ` Yonghong Song
2026-04-20 15:41 ` [PATCH bpf-next v6 00/17] bpf: Support stack arguments for BPF functions and kfuncs Puranjay Mohan
2026-04-20 20:22 ` Yonghong Song
2026-04-20 20:25 ` Puranjay Mohan
2026-04-20 21:49 ` Alexei Starovoitov
2026-04-20 23:44 ` Yonghong Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260419163331.733278-1-yonghong.song@linux.dev \
--to=yonghong.song@linux.dev \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=jose.marchesi@oracle.com \
--cc=kernel-team@fb.com \
--cc=martin.lau@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.