From: Yonghong Song <yonghong.song@linux.dev>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
Andrii Nakryiko <andrii@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
"Jose E . Marchesi" <jose.marchesi@oracle.com>,
kernel-team@fb.com, Martin KaFai Lau <martin.lau@kernel.org>
Subject: [PATCH bpf-next v5 04/16] bpf: Prepare verifier logs for upcoming kfunc stack arguments
Date: Thu, 16 Apr 2026 20:47:19 -0700 [thread overview]
Message-ID: <20260417034719.2627762-1-yonghong.song@linux.dev> (raw)
In-Reply-To: <20260417034658.2625353-1-yonghong.song@linux.dev>
This change prepares verifier log reporting for upcoming kfunc stack
argument support.
Today verifier log code mostly assumes that an argument can be described
directly by a register number. That works for arguments passed in `R1`
to `R5`, but it does not work once kfunc arguments can also be
passed on the stack.
Introduce an internal `argno` representation such that register-passed
arguments keep using their real register numbers, while stack-passed
arguments use an encoded value above a dedicated base.
`reg_arg_name()` converts this representation into either `R%d` or
`*(R11-off)` when emitting verifier logs. If a particular `argno`
is corresponding to a stack argument, print `*(R11-off)`. Otherwise,
print `R%d`. Here R11 presents the base of stack arguments.
This keeps existing logs readable for register arguments and allows the
same log sites to handle future stack arguments without open-coding
special cases.
Update selftests accordingly.
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
---
include/linux/bpf_verifier.h | 1 +
kernel/bpf/verifier.c | 649 ++++++++++--------
.../testing/selftests/bpf/prog_tests/bpf_nf.c | 22 +-
.../selftests/bpf/prog_tests/cb_refs.c | 2 +-
.../selftests/bpf/prog_tests/kfunc_call.c | 2 +-
.../selftests/bpf/prog_tests/linked_list.c | 4 +-
.../selftests/bpf/progs/cgrp_kfunc_failure.c | 14 +-
.../selftests/bpf/progs/cpumask_failure.c | 10 +-
.../testing/selftests/bpf/progs/dynptr_fail.c | 22 +-
.../selftests/bpf/progs/file_reader_fail.c | 4 +-
tools/testing/selftests/bpf/progs/irq.c | 4 +-
tools/testing/selftests/bpf/progs/iters.c | 6 +-
.../selftests/bpf/progs/iters_state_safety.c | 14 +-
.../selftests/bpf/progs/iters_testmod.c | 4 +-
.../selftests/bpf/progs/iters_testmod_seq.c | 4 +-
.../selftests/bpf/progs/map_kptr_fail.c | 2 +-
.../selftests/bpf/progs/percpu_alloc_fail.c | 4 +-
.../testing/selftests/bpf/progs/rbtree_fail.c | 6 +-
.../bpf/progs/refcounted_kptr_fail.c | 2 +-
.../testing/selftests/bpf/progs/stream_fail.c | 2 +-
.../selftests/bpf/progs/task_kfunc_failure.c | 18 +-
.../selftests/bpf/progs/task_work_fail.c | 6 +-
.../selftests/bpf/progs/test_bpf_nf_fail.c | 8 +-
.../bpf/progs/test_kfunc_dynptr_param.c | 2 +-
.../bpf/progs/test_kfunc_param_nullable.c | 2 +-
.../selftests/bpf/progs/verifier_bits_iter.c | 4 +-
.../bpf/progs/verifier_ref_tracking.c | 6 +-
.../selftests/bpf/progs/verifier_vfs_reject.c | 8 +-
.../testing/selftests/bpf/progs/wq_failures.c | 2 +-
tools/testing/selftests/bpf/verifier/calls.c | 14 +-
30 files changed, 474 insertions(+), 374 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 53e8664cb566..29a8a2605a12 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -912,6 +912,7 @@ struct bpf_verifier_env {
* e.g., in reg_type_str() to generate reg_type string
*/
char tmp_str_buf[TMP_STR_BUF_LEN];
+ char tmp_reg_arg_name_buf[32];
struct bpf_insn insn_buf[INSN_BUF_SIZE];
struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
struct bpf_scc_callchain callchain_buf;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 7a7024d94cf0..ff0c55d80311 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1751,6 +1751,55 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
return &elem->st;
}
+/*
+ * Unified argument number encoding for verifier log messages.
+ * Register args (arg_idx 0-4) use their register number (R1-R5).
+ * Stack args (arg_idx 5+) are encoded as STACK_ARGNO_BASE + arg_idx
+ * to avoid collision with register numbers. reg_arg_name() decodes
+ * this back to a human-readable string like "*(R11-8)" for logs.
+ */
+#define STACK_ARGNO_BASE 100
+
+static bool is_stack_argno(int argno)
+{
+ return argno >= STACK_ARGNO_BASE;
+}
+
+static u32 make_argno(u32 arg_idx)
+{
+ if (arg_idx < MAX_BPF_FUNC_REG_ARGS)
+ return BPF_REG_1 + arg_idx;
+ return STACK_ARGNO_BASE + arg_idx;
+}
+
+static u32 arg_idx_from_argno(int argno)
+{
+ if (is_stack_argno(argno))
+ return argno - STACK_ARGNO_BASE;
+ return argno - BPF_REG_1;
+}
+
+static int next_argno(int argno)
+{
+ return make_argno(arg_idx_from_argno(argno) + 1);
+}
+
+static const char *reg_arg_name(struct bpf_verifier_env *env, int argno)
+{
+ char *buf = env->tmp_reg_arg_name_buf;
+ int len = sizeof(env->tmp_reg_arg_name_buf);
+ u32 idx;
+
+ if (!is_stack_argno(argno)) {
+ snprintf(buf, len, "R%d", argno);
+ return buf;
+ }
+
+ idx = arg_idx_from_argno(argno);
+ snprintf(buf, len, "*(R11-%u)", (idx - MAX_BPF_FUNC_REG_ARGS + 1) * BPF_REG_SIZE);
+ return buf;
+}
+
static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
@@ -4245,7 +4294,7 @@ enum bpf_access_src {
};
static int check_stack_range_initialized(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- int regno, int off, int access_size,
+ int argno, int off, int access_size,
bool zero_size_allowed,
enum bpf_access_type type,
struct bpf_call_arg_meta *meta);
@@ -4269,7 +4318,7 @@ static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
* instead.
*/
static int check_stack_read_var_off(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- int ptr_regno, int off, int size, int dst_regno)
+ int ptr_argno, int off, int size, int dst_regno)
{
struct bpf_func_state *ptr_state = bpf_func(env, reg);
int err;
@@ -4277,7 +4326,7 @@ static int check_stack_read_var_off(struct bpf_verifier_env *env, struct bpf_reg
/* Note that we pass a NULL meta, so raw access will not be permitted.
*/
- err = check_stack_range_initialized(env, reg, ptr_regno, off, size,
+ err = check_stack_range_initialized(env, reg, ptr_argno, off, size,
false, BPF_READ, NULL);
if (err)
return err;
@@ -4299,7 +4348,7 @@ static int check_stack_read_var_off(struct bpf_verifier_env *env, struct bpf_reg
* can be -1, meaning that the read value is not going to a register.
*/
static int check_stack_read(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, int ptr_regno, int off, int size,
+ struct bpf_reg_state *reg, int ptr_argno, int off, int size,
int dst_regno)
{
struct bpf_func_state *state = bpf_func(env, reg);
@@ -4337,7 +4386,7 @@ static int check_stack_read(struct bpf_verifier_env *env,
* than fixed offset ones. Note that dst_regno >= 0 on this
* branch.
*/
- err = check_stack_read_var_off(env, reg, ptr_regno, off, size,
+ err = check_stack_read_var_off(env, reg, ptr_argno, off, size,
dst_regno);
}
return err;
@@ -4375,7 +4424,7 @@ static int check_stack_write(struct bpf_verifier_env *env,
return err;
}
-static int check_map_access_type(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno,
+static int check_map_access_type(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 argno,
int off, int size, enum bpf_access_type type)
{
struct bpf_map *map = reg->map_ptr;
@@ -4397,7 +4446,7 @@ static int check_map_access_type(struct bpf_verifier_env *env, struct bpf_reg_st
}
/* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
-static int __check_mem_access(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int regno,
+static int __check_mem_access(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int argno,
int off, int size, u32 mem_size,
bool zero_size_allowed)
{
@@ -4418,8 +4467,8 @@ static int __check_mem_access(struct bpf_verifier_env *env, struct bpf_reg_state
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
case PTR_TO_PACKET_END:
- verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
- off, size, regno, reg->id, off, mem_size);
+ verbose(env, "invalid access to packet, off=%d size=%d, %s(id=%d,off=%d,r=%d)\n",
+ off, size, reg_arg_name(env, argno), reg->id, off, mem_size);
break;
case PTR_TO_CTX:
verbose(env, "invalid access to context, ctx_size=%d off=%d size=%d\n",
@@ -4435,7 +4484,7 @@ static int __check_mem_access(struct bpf_verifier_env *env, struct bpf_reg_state
}
/* check read/write into a memory region with possible variable offset */
-static int check_mem_region_access(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno,
+static int check_mem_region_access(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 argno,
int off, int size, u32 mem_size,
bool zero_size_allowed)
{
@@ -4455,15 +4504,15 @@ static int check_mem_region_access(struct bpf_verifier_env *env, struct bpf_reg_
(reg->smin_value == S64_MIN ||
(off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
reg->smin_value + off < 0)) {
- verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
- regno);
+ verbose(env, "%s min value is negative, either use unsigned index or do a if (index >=0) check.\n",
+ reg_arg_name(env, argno));
return -EACCES;
}
- err = __check_mem_access(env, reg, regno, reg->smin_value + off, size,
+ err = __check_mem_access(env, reg, argno, reg->smin_value + off, size,
mem_size, zero_size_allowed);
if (err) {
- verbose(env, "R%d min value is outside of the allowed memory range\n",
- regno);
+ verbose(env, "%s min value is outside of the allowed memory range\n",
+ reg_arg_name(env, argno));
return err;
}
@@ -4472,15 +4521,15 @@ static int check_mem_region_access(struct bpf_verifier_env *env, struct bpf_reg_
* If reg->umax_value + off could overflow, treat that as unbounded too.
*/
if (reg->umax_value >= BPF_MAX_VAR_OFF) {
- verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
- regno);
+ verbose(env, "%s unbounded memory access, make sure to bounds check any such access\n",
+ reg_arg_name(env, argno));
return -EACCES;
}
- err = __check_mem_access(env, reg, regno, reg->umax_value + off, size,
+ err = __check_mem_access(env, reg, argno, reg->umax_value + off, size,
mem_size, zero_size_allowed);
if (err) {
- verbose(env, "R%d max value is outside of the allowed memory range\n",
- regno);
+ verbose(env, "%s max value is outside of the allowed memory range\n",
+ reg_arg_name(env, argno));
return err;
}
@@ -4488,7 +4537,7 @@ static int check_mem_region_access(struct bpf_verifier_env *env, struct bpf_reg_
}
static int __check_ptr_off_reg(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno,
+ const struct bpf_reg_state *reg, u32 argno,
bool fixed_off_ok)
{
/* Access to this pointer-typed register or passing it to a helper
@@ -4505,14 +4554,14 @@ static int __check_ptr_off_reg(struct bpf_verifier_env *env,
}
if (reg->smin_value < 0) {
- verbose(env, "negative offset %s ptr R%d off=%lld disallowed\n",
- reg_type_str(env, reg->type), regno, reg->var_off.value);
+ verbose(env, "negative offset %s ptr %s off=%lld disallowed\n",
+ reg_type_str(env, reg->type), reg_arg_name(env, argno), reg->var_off.value);
return -EACCES;
}
if (!fixed_off_ok && reg->var_off.value != 0) {
- verbose(env, "dereference of modified %s ptr R%d off=%lld disallowed\n",
- reg_type_str(env, reg->type), regno, reg->var_off.value);
+ verbose(env, "dereference of modified %s ptr %s off=%lld disallowed\n",
+ reg_type_str(env, reg->type), reg_arg_name(env, argno), reg->var_off.value);
return -EACCES;
}
@@ -4882,17 +4931,17 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
}
}
-static int check_packet_access(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno, int off,
+static int check_packet_access(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 argno, int off,
int size, bool zero_size_allowed)
{
int err;
if (reg->range < 0) {
- verbose(env, "R%d offset is outside of the packet\n", regno);
+ verbose(env, "%s offset is outside of the packet\n", reg_arg_name(env, argno));
return -EINVAL;
}
- err = check_mem_region_access(env, reg, regno, off, size, reg->range, zero_size_allowed);
+ err = check_mem_region_access(env, reg, argno, off, size, reg->range, zero_size_allowed);
if (err)
return err;
@@ -4947,7 +4996,7 @@ static int __check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int of
return -EACCES;
}
-static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, struct bpf_reg_state *reg, u32 regno,
+static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, struct bpf_reg_state *reg, u32 argno,
int off, int access_size, enum bpf_access_type t,
struct bpf_insn_access_aux *info)
{
@@ -4960,9 +5009,9 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, struct b
int err;
if (var_off_ok)
- err = check_mem_region_access(env, reg, regno, off, access_size, U16_MAX, false);
+ err = check_mem_region_access(env, reg, argno, off, access_size, U16_MAX, false);
else
- err = __check_ptr_off_reg(env, reg, regno, fixed_off_ok);
+ err = __check_ptr_off_reg(env, reg, argno, fixed_off_ok);
if (err)
return err;
off += reg->umax_value;
@@ -4986,15 +5035,15 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
}
static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
- struct bpf_reg_state *reg, u32 regno, int off, int size,
+ struct bpf_reg_state *reg, u32 argno, int off, int size,
enum bpf_access_type t)
{
struct bpf_insn_access_aux info = {};
bool valid;
if (reg->smin_value < 0) {
- verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
- regno);
+ verbose(env, "%s min value is negative, either use unsigned index or do a if (index >=0) check.\n",
+ reg_arg_name(env, argno));
return -EACCES;
}
@@ -5022,8 +5071,8 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
return 0;
}
- verbose(env, "R%d invalid %s access off=%d size=%d\n",
- regno, reg_type_str(env, reg->type), off, size);
+ verbose(env, "%s invalid %s access off=%d size=%d\n",
+ reg_arg_name(env, argno), reg_type_str(env, reg->type), off, size);
return -EACCES;
}
@@ -5533,12 +5582,12 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
static int __check_buffer_access(struct bpf_verifier_env *env,
const char *buf_info,
const struct bpf_reg_state *reg,
- int regno, int off, int size)
+ int argno, int off, int size)
{
if (off < 0) {
verbose(env,
- "R%d invalid %s buffer access: off=%d, size=%d\n",
- regno, buf_info, off, size);
+ "%s invalid %s buffer access: off=%d, size=%d\n",
+ reg_arg_name(env, argno), buf_info, off, size);
return -EACCES;
}
if (!tnum_is_const(reg->var_off)) {
@@ -5546,8 +5595,8 @@ static int __check_buffer_access(struct bpf_verifier_env *env,
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env,
- "R%d invalid variable buffer offset: off=%d, var_off=%s\n",
- regno, off, tn_buf);
+ "%s invalid variable buffer offset: off=%d, var_off=%s\n",
+ reg_arg_name(env, argno), off, tn_buf);
return -EACCES;
}
@@ -5556,11 +5605,11 @@ static int __check_buffer_access(struct bpf_verifier_env *env,
static int check_tp_buffer_access(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
- int regno, int off, int size)
+ int argno, int off, int size)
{
int err;
- err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
+ err = __check_buffer_access(env, "tracepoint", reg, argno, off, size);
if (err)
return err;
@@ -5572,14 +5621,14 @@ static int check_tp_buffer_access(struct bpf_verifier_env *env,
static int check_buffer_access(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
- int regno, int off, int size,
+ int argno, int off, int size,
bool zero_size_allowed,
u32 *max_access)
{
const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
int err;
- err = __check_buffer_access(env, buf_info, reg, regno, off, size);
+ err = __check_buffer_access(env, buf_info, reg, argno, off, size);
if (err)
return err;
@@ -5952,7 +6001,7 @@ static bool type_is_trusted_or_null(struct bpf_verifier_env *env,
static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, struct bpf_reg_state *reg,
- int regno, int off, int size,
+ int argno, int off, int size,
enum bpf_access_type atype,
int value_regno)
{
@@ -5981,8 +6030,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env,
- "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
- regno, tname, off, tn_buf);
+ "%s is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
+ reg_arg_name(env, argno), tname, off, tn_buf);
return -EACCES;
}
@@ -5990,22 +6039,22 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
if (off < 0) {
verbose(env,
- "R%d is ptr_%s invalid negative access: off=%d\n",
- regno, tname, off);
+ "%s is ptr_%s invalid negative access: off=%d\n",
+ reg_arg_name(env, argno), tname, off);
return -EACCES;
}
if (reg->type & MEM_USER) {
verbose(env,
- "R%d is ptr_%s access user memory: off=%d\n",
- regno, tname, off);
+ "%s is ptr_%s access user memory: off=%d\n",
+ reg_arg_name(env, argno), tname, off);
return -EACCES;
}
if (reg->type & MEM_PERCPU) {
verbose(env,
- "R%d is ptr_%s access percpu memory: off=%d\n",
- regno, tname, off);
+ "%s is ptr_%s access percpu memory: off=%d\n",
+ reg_arg_name(env, argno), tname, off);
return -EACCES;
}
@@ -6108,7 +6157,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
static int check_ptr_to_map_access(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, struct bpf_reg_state *reg,
- int regno, int off, int size,
+ int argno, int off, int size,
enum bpf_access_type atype,
int value_regno)
{
@@ -6142,8 +6191,8 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
}
if (off < 0) {
- verbose(env, "R%d is %s invalid negative access: off=%d\n",
- regno, tname, off);
+ verbose(env, "%s is %s invalid negative access: off=%d\n",
+ reg_arg_name(env, argno), tname, off);
return -EACCES;
}
@@ -6201,7 +6250,7 @@ static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
*/
static int check_stack_access_within_bounds(
struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- int regno, int off, int access_size,
+ int argno, int off, int access_size,
enum bpf_access_type type)
{
struct bpf_func_state *state = bpf_func(env, reg);
@@ -6220,8 +6269,8 @@ static int check_stack_access_within_bounds(
} else {
if (reg->smax_value >= BPF_MAX_VAR_OFF ||
reg->smin_value <= -BPF_MAX_VAR_OFF) {
- verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
- err_extra, regno);
+ verbose(env, "invalid unbounded variable-offset%s stack %s\n",
+ err_extra, reg_arg_name(env, argno));
return -EACCES;
}
min_off = reg->smin_value + off;
@@ -6239,14 +6288,14 @@ static int check_stack_access_within_bounds(
if (err) {
if (tnum_is_const(reg->var_off)) {
- verbose(env, "invalid%s stack R%d off=%lld size=%d\n",
- err_extra, regno, min_off, access_size);
+ verbose(env, "invalid%s stack %s off=%lld size=%d\n",
+ err_extra, reg_arg_name(env, argno), min_off, access_size);
} else {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "invalid variable-offset%s stack R%d var_off=%s off=%d size=%d\n",
- err_extra, regno, tn_buf, off, access_size);
+ verbose(env, "invalid variable-offset%s stack %s var_off=%s off=%d size=%d\n",
+ err_extra, reg_arg_name(env, argno), tn_buf, off, access_size);
}
return err;
}
@@ -6291,7 +6340,7 @@ static void add_scalar_to_reg(struct bpf_reg_state *dst_reg, s64 val)
* if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory
*/
-static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct bpf_reg_state *reg, u32 regno,
+static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct bpf_reg_state *reg, u32 argno,
int off, int bpf_size, enum bpf_access_type t,
int value_regno, bool strict_alignment_once, bool is_ldsx)
{
@@ -6308,11 +6357,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
if (reg->type == PTR_TO_MAP_KEY) {
if (t == BPF_WRITE) {
- verbose(env, "write to change key R%d not allowed\n", regno);
+ verbose(env, "write to change key %s not allowed\n",
+ reg_arg_name(env, argno));
return -EACCES;
}
- err = check_mem_region_access(env, reg, regno, off, size,
+ err = check_mem_region_access(env, reg, argno, off, size,
reg->map_ptr->key_size, false);
if (err)
return err;
@@ -6326,10 +6376,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
verbose(env, "R%d leaks addr into map\n", value_regno);
return -EACCES;
}
- err = check_map_access_type(env, reg, regno, off, size, t);
+ err = check_map_access_type(env, reg, argno, off, size, t);
if (err)
return err;
- err = check_map_access(env, reg, regno, off, size, false, ACCESS_DIRECT);
+ err = check_map_access(env, reg, argno, off, size, false, ACCESS_DIRECT);
if (err)
return err;
if (tnum_is_const(reg->var_off))
@@ -6376,14 +6426,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
bool rdonly_untrusted = rdonly_mem && (reg->type & PTR_UNTRUSTED);
if (type_may_be_null(reg->type)) {
- verbose(env, "R%d invalid mem access '%s'\n", regno,
+ verbose(env, "%s invalid mem access '%s'\n", reg_arg_name(env, argno),
reg_type_str(env, reg->type));
return -EACCES;
}
if (t == BPF_WRITE && rdonly_mem) {
- verbose(env, "R%d cannot write into %s\n",
- regno, reg_type_str(env, reg->type));
+ verbose(env, "%s cannot write into %s\n",
+ reg_arg_name(env, argno), reg_type_str(env, reg->type));
return -EACCES;
}
@@ -6398,7 +6448,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
* instructions, hence no need to check bounds in that case.
*/
if (!rdonly_untrusted)
- err = check_mem_region_access(env, reg, regno, off, size,
+ err = check_mem_region_access(env, reg, argno, off, size,
reg->mem_size, false);
if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
mark_reg_unknown(env, regs, value_regno);
@@ -6416,7 +6466,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
return -EACCES;
}
- err = check_ctx_access(env, insn_idx, reg, regno, off, size, t, &info);
+ err = check_ctx_access(env, insn_idx, reg, argno, off, size, t, &info);
if (!err && t == BPF_READ && value_regno >= 0) {
/* ctx access returns either a scalar, or a
* PTR_TO_PACKET[_META,_END]. In the latter
@@ -6453,12 +6503,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
} else if (reg->type == PTR_TO_STACK) {
/* Basic bounds checks. */
- err = check_stack_access_within_bounds(env, reg, regno, off, size, t);
+ err = check_stack_access_within_bounds(env, reg, argno, off, size, t);
if (err)
return err;
if (t == BPF_READ)
- err = check_stack_read(env, reg, regno, off, size,
+ err = check_stack_read(env, reg, argno, off, size,
value_regno);
else
err = check_stack_write(env, reg, off, size,
@@ -6474,7 +6524,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
value_regno);
return -EACCES;
}
- err = check_packet_access(env, reg, regno, off, size, false);
+ err = check_packet_access(env, reg, argno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_FLOW_KEYS) {
@@ -6490,23 +6540,23 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
mark_reg_unknown(env, regs, value_regno);
} else if (type_is_sk_pointer(reg->type)) {
if (t == BPF_WRITE) {
- verbose(env, "R%d cannot write into %s\n",
- regno, reg_type_str(env, reg->type));
+ verbose(env, "%s cannot write into %s\n",
+ reg_arg_name(env, argno), reg_type_str(env, reg->type));
return -EACCES;
}
- err = check_sock_access(env, insn_idx, reg, regno, off, size, t);
+ err = check_sock_access(env, insn_idx, reg, argno, off, size, t);
if (!err && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_TP_BUFFER) {
- err = check_tp_buffer_access(env, reg, regno, off, size);
+ err = check_tp_buffer_access(env, reg, argno, off, size);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
!type_may_be_null(reg->type)) {
- err = check_ptr_to_btf_access(env, regs, reg, regno, off, size, t,
+ err = check_ptr_to_btf_access(env, regs, reg, argno, off, size, t,
value_regno);
} else if (reg->type == CONST_PTR_TO_MAP) {
- err = check_ptr_to_map_access(env, regs, reg, regno, off, size, t,
+ err = check_ptr_to_map_access(env, regs, reg, argno, off, size, t,
value_regno);
} else if (base_type(reg->type) == PTR_TO_BUF &&
!type_may_be_null(reg->type)) {
@@ -6515,8 +6565,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
if (rdonly_mem) {
if (t == BPF_WRITE) {
- verbose(env, "R%d cannot write into %s\n",
- regno, reg_type_str(env, reg->type));
+ verbose(env, "%s cannot write into %s\n",
+ reg_arg_name(env, argno), reg_type_str(env, reg->type));
return -EACCES;
}
max_access = &env->prog->aux->max_rdonly_access;
@@ -6524,7 +6574,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
max_access = &env->prog->aux->max_rdwr_access;
}
- err = check_buffer_access(env, reg, regno, off, size, false,
+ err = check_buffer_access(env, reg, argno, off, size, false,
max_access);
if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
@@ -6533,7 +6583,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
if (t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else {
- verbose(env, "R%d invalid mem access '%s'\n", regno,
+ verbose(env, "%s invalid mem access '%s'\n", reg_arg_name(env, argno),
reg_type_str(env, reg->type));
return -EACCES;
}
@@ -6787,7 +6837,7 @@ static int check_atomic(struct bpf_verifier_env *env, struct bpf_insn *insn)
* read offsets are marked as read.
*/
static int check_stack_range_initialized(
- struct bpf_verifier_env *env, struct bpf_reg_state *reg, int regno, int off,
+ struct bpf_verifier_env *env, struct bpf_reg_state *reg, int argno, int off,
int access_size, bool zero_size_allowed,
enum bpf_access_type type, struct bpf_call_arg_meta *meta)
{
@@ -6812,7 +6862,7 @@ static int check_stack_range_initialized(
return -EACCES;
}
- err = check_stack_access_within_bounds(env, reg, regno, off, access_size, type);
+ err = check_stack_access_within_bounds(env, reg, argno, off, access_size, type);
if (err)
return err;
@@ -6829,8 +6879,8 @@ static int check_stack_range_initialized(
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
- regno, tn_buf);
+ verbose(env, "%s variable offset stack access prohibited for !root, var_off=%s\n",
+ reg_arg_name(env, argno), tn_buf);
return -EACCES;
}
/* Only initialized buffer on stack is allowed to be accessed
@@ -6873,7 +6923,7 @@ static int check_stack_range_initialized(
}
}
meta->access_size = access_size;
- meta->regno = regno;
+ meta->regno = argno;
return 0;
}
@@ -6913,17 +6963,17 @@ static int check_stack_range_initialized(
if (*stype == STACK_POISON) {
if (allow_poison)
goto mark;
- verbose(env, "reading from stack R%d off %d+%d size %d, slot poisoned by dead code elimination\n",
- regno, min_off, i - min_off, access_size);
+ verbose(env, "reading from stack %s off %d+%d size %d, slot poisoned by dead code elimination\n",
+ reg_arg_name(env, argno), min_off, i - min_off, access_size);
} else if (tnum_is_const(reg->var_off)) {
- verbose(env, "invalid read from stack R%d off %d+%d size %d\n",
- regno, min_off, i - min_off, access_size);
+ verbose(env, "invalid read from stack %s off %d+%d size %d\n",
+ reg_arg_name(env, argno), min_off, i - min_off, access_size);
} else {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "invalid read from stack R%d var_off %s+%d size %d\n",
- regno, tn_buf, i - min_off, access_size);
+ verbose(env, "invalid read from stack %s var_off %s+%d size %d\n",
+ reg_arg_name(env, argno), tn_buf, i - min_off, access_size);
}
return -EACCES;
mark:
@@ -6932,7 +6982,7 @@ static int check_stack_range_initialized(
return 0;
}
-static int check_helper_mem_access(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int regno,
+static int check_helper_mem_access(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int argno,
int access_size, enum bpf_access_type access_type,
bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
@@ -6943,37 +6993,37 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, struct bpf_reg_
switch (base_type(reg->type)) {
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
- return check_packet_access(env, reg, regno, 0, access_size,
+ return check_packet_access(env, reg, argno, 0, access_size,
zero_size_allowed);
case PTR_TO_MAP_KEY:
if (access_type == BPF_WRITE) {
- verbose(env, "R%d cannot write into %s\n", regno,
- reg_type_str(env, reg->type));
+ verbose(env, "%s cannot write into %s\n",
+ reg_arg_name(env, argno), reg_type_str(env, reg->type));
return -EACCES;
}
- return check_mem_region_access(env, reg, regno, 0, access_size,
+ return check_mem_region_access(env, reg, argno, 0, access_size,
reg->map_ptr->key_size, false);
case PTR_TO_MAP_VALUE:
- if (check_map_access_type(env, reg, regno, 0, access_size, access_type))
+ if (check_map_access_type(env, reg, argno, 0, access_size, access_type))
return -EACCES;
- return check_map_access(env, reg, regno, 0, access_size,
+ return check_map_access(env, reg, argno, 0, access_size,
zero_size_allowed, ACCESS_HELPER);
case PTR_TO_MEM:
if (type_is_rdonly_mem(reg->type)) {
if (access_type == BPF_WRITE) {
- verbose(env, "R%d cannot write into %s\n", regno,
- reg_type_str(env, reg->type));
+ verbose(env, "%s cannot write into %s\n",
+ reg_arg_name(env, argno), reg_type_str(env, reg->type));
return -EACCES;
}
}
- return check_mem_region_access(env, reg, regno, 0,
+ return check_mem_region_access(env, reg, argno, 0,
access_size, reg->mem_size,
zero_size_allowed);
case PTR_TO_BUF:
if (type_is_rdonly_mem(reg->type)) {
if (access_type == BPF_WRITE) {
- verbose(env, "R%d cannot write into %s\n", regno,
- reg_type_str(env, reg->type));
+ verbose(env, "%s cannot write into %s\n",
+ reg_arg_name(env, argno), reg_type_str(env, reg->type));
return -EACCES;
}
@@ -6981,21 +7031,21 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, struct bpf_reg_
} else {
max_access = &env->prog->aux->max_rdwr_access;
}
- return check_buffer_access(env, reg, regno, 0,
+ return check_buffer_access(env, reg, argno, 0,
access_size, zero_size_allowed,
max_access);
case PTR_TO_STACK:
return check_stack_range_initialized(
env, reg,
- regno, 0, access_size,
+ argno, 0, access_size,
zero_size_allowed, access_type, meta);
case PTR_TO_BTF_ID:
- return check_ptr_to_btf_access(env, regs, reg, regno, 0,
+ return check_ptr_to_btf_access(env, regs, reg, argno, 0,
access_size, BPF_READ, -1);
case PTR_TO_CTX:
/* Only permit reading or writing syscall context using helper calls. */
if (is_var_ctx_off_allowed(env->prog)) {
- int err = check_mem_region_access(env, reg, regno, 0, access_size, U16_MAX,
+ int err = check_mem_region_access(env, reg, argno, 0, access_size, U16_MAX,
zero_size_allowed);
if (err)
return err;
@@ -7010,7 +7060,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, struct bpf_reg_
bpf_register_is_null(reg))
return 0;
- verbose(env, "R%d type=%s ", regno,
+ verbose(env, "%s type=%s ", reg_arg_name(env, argno),
reg_type_str(env, reg->type));
verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
return -EACCES;
@@ -7025,12 +7075,12 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, struct bpf_reg_
*/
static int check_mem_size_reg(struct bpf_verifier_env *env,
struct bpf_reg_state *mem_reg,
- struct bpf_reg_state *size_reg, u32 mem_regno,
+ struct bpf_reg_state *size_reg, u32 mem_argno,
enum bpf_access_type access_type,
bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
- int size_regno = mem_regno + 1;
+ int size_argno = next_argno(mem_argno);
int err;
/* This is used to refine r0 return value bounds for helpers
@@ -7052,31 +7102,31 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
meta = NULL;
if (size_reg->smin_value < 0) {
- verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
- size_regno);
+ verbose(env, "%s min value is negative, either use unsigned or 'var &= const'\n",
+ reg_arg_name(env, size_argno));
return -EACCES;
}
if (size_reg->umin_value == 0 && !zero_size_allowed) {
- verbose(env, "R%d invalid zero-sized read: u64=[%lld,%lld]\n",
- size_regno, size_reg->umin_value, size_reg->umax_value);
+ verbose(env, "%s invalid zero-sized read: u64=[%lld,%lld]\n",
+ reg_arg_name(env, size_argno), size_reg->umin_value, size_reg->umax_value);
return -EACCES;
}
if (size_reg->umax_value >= BPF_MAX_VAR_SIZ) {
- verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
- size_regno);
+ verbose(env, "%s unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
+ reg_arg_name(env, size_argno));
return -EACCES;
}
- err = check_helper_mem_access(env, mem_reg, mem_regno, size_reg->umax_value,
+ err = check_helper_mem_access(env, mem_reg, mem_argno, size_reg->umax_value,
access_type, zero_size_allowed, meta);
- if (!err)
- err = mark_chain_precision(env, size_regno);
+ if (!err && !is_stack_argno(size_argno))
+ err = mark_chain_precision(env, size_argno);
return err;
}
static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- u32 regno, u32 mem_size)
+ u32 argno, u32 mem_size)
{
bool may_be_null = type_may_be_null(reg->type);
struct bpf_reg_state saved_reg;
@@ -7096,8 +7146,8 @@ static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg
int size = base_type(reg->type) == PTR_TO_STACK ? -(int)mem_size : mem_size;
- err = check_helper_mem_access(env, reg, regno, size, BPF_READ, true, NULL);
- err = err ?: check_helper_mem_access(env, reg, regno, size, BPF_WRITE, true, NULL);
+ err = check_helper_mem_access(env, reg, argno, size, BPF_READ, true, NULL);
+ err = err ?: check_helper_mem_access(env, reg, argno, size, BPF_WRITE, true, NULL);
if (may_be_null)
*reg = saved_reg;
@@ -7106,14 +7156,15 @@ static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg
}
static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *mem_reg,
- struct bpf_reg_state *size_reg, u32 mem_regno)
+ struct bpf_reg_state *size_reg, u32 mem_argno)
{
bool may_be_null = type_may_be_null(mem_reg->type);
struct bpf_reg_state saved_reg;
struct bpf_call_arg_meta meta;
+ u32 argno = make_argno(mem_argno);
int err;
- WARN_ON_ONCE(mem_regno > BPF_REG_4);
+ WARN_ON_ONCE(mem_argno > BPF_REG_3);
memset(&meta, 0, sizeof(meta));
@@ -7122,8 +7173,8 @@ static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg
mark_ptr_not_null_reg(mem_reg);
}
- err = check_mem_size_reg(env, mem_reg, size_reg, mem_regno, BPF_READ, true, &meta);
- err = err ?: check_mem_size_reg(env, mem_reg, size_reg, mem_regno, BPF_WRITE, true, &meta);
+ err = check_mem_size_reg(env, mem_reg, size_reg, argno, BPF_READ, true, &meta);
+ err = err ?: check_mem_size_reg(env, mem_reg, size_reg, argno, BPF_WRITE, true, &meta);
if (may_be_null)
*mem_reg = saved_reg;
@@ -7159,7 +7210,7 @@ enum {
* env->cur_state->active_locks remembers which map value element or allocated
* object got locked and clears it after bpf_spin_unlock.
*/
-static int process_spin_lock(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int regno, int flags)
+static int process_spin_lock(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int argno, int flags)
{
bool is_lock = flags & PROCESS_SPIN_LOCK, is_res_lock = flags & PROCESS_RES_LOCK;
const char *lock_str = is_res_lock ? "bpf_res_spin" : "bpf_spin";
@@ -7175,8 +7226,8 @@ static int process_spin_lock(struct bpf_verifier_env *env, struct bpf_reg_state
if (!is_const) {
verbose(env,
- "R%d doesn't have constant offset. %s_lock has to be at the constant offset\n",
- regno, lock_str);
+ "%s doesn't have constant offset. %s_lock has to be at the constant offset\n",
+ reg_arg_name(env, argno), lock_str);
return -EINVAL;
}
if (reg->type == PTR_TO_MAP_VALUE) {
@@ -7275,7 +7326,7 @@ static int process_spin_lock(struct bpf_verifier_env *env, struct bpf_reg_state
}
/* Check if @regno is a pointer to a specific field in a map value */
-static int check_map_field_pointer(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno,
+static int check_map_field_pointer(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 argno,
enum btf_field_type field_type,
struct bpf_map_desc *map_desc)
{
@@ -7287,8 +7338,8 @@ static int check_map_field_pointer(struct bpf_verifier_env *env, struct bpf_reg_
if (!is_const) {
verbose(env,
- "R%d doesn't have constant offset. %s has to be at the constant offset\n",
- regno, struct_name);
+ "%s doesn't have constant offset. %s has to be at the constant offset\n",
+ reg_arg_name(env, argno), struct_name);
return -EINVAL;
}
if (!map->btf) {
@@ -7328,26 +7379,26 @@ static int check_map_field_pointer(struct bpf_verifier_env *env, struct bpf_reg_
return 0;
}
-static int process_timer_func(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int regno,
+static int process_timer_func(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int argno,
struct bpf_map_desc *map)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
verbose(env, "bpf_timer cannot be used for PREEMPT_RT.\n");
return -EOPNOTSUPP;
}
- return check_map_field_pointer(env, reg, regno, BPF_TIMER, map);
+ return check_map_field_pointer(env, reg, argno, BPF_TIMER, map);
}
-static int process_timer_helper(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int regno,
+static int process_timer_helper(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int argno,
struct bpf_call_arg_meta *meta)
{
- return process_timer_func(env, reg, regno, &meta->map);
+ return process_timer_func(env, reg, argno, &meta->map);
}
-static int process_timer_kfunc(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int regno,
+static int process_timer_kfunc(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int argno,
struct bpf_kfunc_call_arg_meta *meta)
{
- return process_timer_func(env, reg, regno, &meta->map);
+ return process_timer_func(env, reg, argno, &meta->map);
}
static int process_kptr_func(struct bpf_verifier_env *env, int regno,
@@ -7423,15 +7474,15 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno,
* Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument
* type, and declare it as 'const struct bpf_dynptr *' in their prototype.
*/
-static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int regno, int insn_idx,
+static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int argno, int insn_idx,
enum bpf_arg_type arg_type, int clone_ref_obj_id)
{
int err;
if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) {
verbose(env,
- "arg#%d expected pointer to stack or const struct bpf_dynptr\n",
- regno - 1);
+ "%s expected pointer to stack or const struct bpf_dynptr\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
@@ -7468,7 +7519,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_stat
/* we write BPF_DW bits (8 bytes) at a time */
for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
- err = check_mem_access(env, insn_idx, reg, regno,
+ err = check_mem_access(env, insn_idx, reg, argno,
i, BPF_DW, BPF_WRITE, -1, false, false);
if (err)
return err;
@@ -7483,17 +7534,16 @@ static int process_dynptr_func(struct bpf_verifier_env *env, struct bpf_reg_stat
}
if (!is_dynptr_reg_valid_init(env, reg)) {
- verbose(env,
- "Expected an initialized dynptr as arg #%d\n",
- regno - 1);
+ verbose(env, "Expected an initialized dynptr as %s\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
/* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */
if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) {
- verbose(env,
- "Expected a dynptr of type %s as arg #%d\n",
- dynptr_type_str(arg_to_dynptr_type(arg_type)), regno - 1);
+ verbose(env, "Expected a dynptr of type %s as %s\n",
+ dynptr_type_str(arg_to_dynptr_type(arg_type)),
+ reg_arg_name(env, argno));
return -EINVAL;
}
@@ -7538,14 +7588,16 @@ static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg_idx,
return btf_param_match_suffix(meta->btf, arg, "__iter");
}
-static int process_iter_arg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int regno, int insn_idx,
+static int process_iter_arg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int argno, int insn_idx,
struct bpf_kfunc_call_arg_meta *meta)
{
const struct btf_type *t;
+ u32 arg_idx = arg_idx_from_argno(argno);
int spi, err, i, nr_slots, btf_id;
if (reg->type != PTR_TO_STACK) {
- verbose(env, "arg#%d expected pointer to an iterator on stack\n", regno - 1);
+ verbose(env, "%s expected pointer to an iterator on stack\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
@@ -7555,9 +7607,10 @@ static int process_iter_arg(struct bpf_verifier_env *env, struct bpf_reg_state *
* to any kfunc, if arg has "__iter" suffix, we need to be a bit more
* conservative here.
*/
- btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, regno - 1);
+ btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, arg_idx);
if (btf_id < 0) {
- verbose(env, "expected valid iter pointer as arg #%d\n", regno - 1);
+ verbose(env, "expected valid iter pointer as %s\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
t = btf_type_by_id(meta->btf, btf_id);
@@ -7566,13 +7619,13 @@ static int process_iter_arg(struct bpf_verifier_env *env, struct bpf_reg_state *
if (is_iter_new_kfunc(meta)) {
/* bpf_iter_<type>_new() expects pointer to uninit iter state */
if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) {
- verbose(env, "expected uninitialized iter_%s as arg #%d\n",
- iter_type_str(meta->btf, btf_id), regno - 1);
+ verbose(env, "expected uninitialized iter_%s as %s\n",
+ iter_type_str(meta->btf, btf_id), reg_arg_name(env, argno));
return -EINVAL;
}
for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) {
- err = check_mem_access(env, insn_idx, reg, regno,
+ err = check_mem_access(env, insn_idx, reg, argno,
i, BPF_DW, BPF_WRITE, -1, false, false);
if (err)
return err;
@@ -7590,8 +7643,8 @@ static int process_iter_arg(struct bpf_verifier_env *env, struct bpf_reg_state *
case 0:
break;
case -EINVAL:
- verbose(env, "expected an initialized iter_%s as arg #%d\n",
- iter_type_str(meta->btf, btf_id), regno - 1);
+ verbose(env, "expected an initialized iter_%s as %s\n",
+ iter_type_str(meta->btf, btf_id), reg_arg_name(env, argno));
return err;
case -EPROTO:
verbose(env, "expected an RCU CS when using %s\n", meta->func_name);
@@ -8011,7 +8064,7 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
[ARG_PTR_TO_DYNPTR] = &dynptr_types,
};
-static int check_reg_type(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno,
+static int check_reg_type(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 argno,
enum bpf_arg_type arg_type,
const u32 *arg_btf_id,
struct bpf_call_arg_meta *meta)
@@ -8046,7 +8099,8 @@ static int check_reg_type(struct bpf_verifier_env *env, struct bpf_reg_state *re
type &= ~DYNPTR_TYPE_FLAG_MASK;
/* Local kptr types are allowed as the source argument of bpf_kptr_xchg */
- if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type) && regno == BPF_REG_2) {
+ if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type) &&
+ !is_stack_argno(argno) && argno == BPF_REG_2) {
type &= ~MEM_ALLOC;
type &= ~MEM_PERCPU;
}
@@ -8060,7 +8114,7 @@ static int check_reg_type(struct bpf_verifier_env *env, struct bpf_reg_state *re
goto found;
}
- verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
+ verbose(env, "%s type=%s expected=", reg_arg_name(env, argno), reg_type_str(env, reg->type));
for (j = 0; j + 1 < i; j++)
verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
@@ -8073,9 +8127,9 @@ static int check_reg_type(struct bpf_verifier_env *env, struct bpf_reg_state *re
if (compatible == &mem_types) {
if (!(arg_type & MEM_RDONLY)) {
verbose(env,
- "%s() may write into memory pointed by R%d type=%s\n",
+ "%s() may write into memory pointed by %s type=%s\n",
func_id_name(meta->func_id),
- regno, reg_type_str(env, reg->type));
+ reg_arg_name(env, argno), reg_type_str(env, reg->type));
return -EACCES;
}
return 0;
@@ -8098,7 +8152,8 @@ static int check_reg_type(struct bpf_verifier_env *env, struct bpf_reg_state *re
if (type_may_be_null(reg->type) &&
(!type_may_be_null(arg_type) || arg_type_is_release(arg_type))) {
- verbose(env, "Possibly NULL pointer passed to helper arg%d\n", regno);
+ verbose(env, "Possibly NULL pointer passed to helper %s\n",
+ reg_arg_name(env, argno));
return -EACCES;
}
@@ -8111,25 +8166,26 @@ static int check_reg_type(struct bpf_verifier_env *env, struct bpf_reg_state *re
}
if (meta->func_id == BPF_FUNC_kptr_xchg) {
- if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
+ if (map_kptr_match_type(env, meta->kptr_field, reg, argno))
return -EACCES;
} else {
if (arg_btf_id == BPF_PTR_POISON) {
verbose(env, "verifier internal error:");
- verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n",
- regno);
+ verbose(env, "%s has non-overwritten BPF_PTR_POISON type\n",
+ reg_arg_name(env, argno));
return -EACCES;
}
- err = __check_ptr_off_reg(env, reg, regno, true);
+ err = __check_ptr_off_reg(env, reg, argno, true);
if (err)
return err;
if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id,
reg->var_off.value, btf_vmlinux, *arg_btf_id,
strict_type_match)) {
- verbose(env, "R%d is of type %s but %s is expected\n",
- regno, btf_type_name(reg->btf, reg->btf_id),
+ verbose(env, "%s is of type %s but %s is expected\n",
+ reg_arg_name(env, argno),
+ btf_type_name(reg->btf, reg->btf_id),
btf_type_name(btf_vmlinux, *arg_btf_id));
return -EACCES;
}
@@ -8146,8 +8202,9 @@ static int check_reg_type(struct bpf_verifier_env *env, struct bpf_reg_state *re
return -EFAULT;
}
/* Check if local kptr in src arg matches kptr in dst arg */
- if (meta->func_id == BPF_FUNC_kptr_xchg && regno == BPF_REG_2) {
- if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
+ if (meta->func_id == BPF_FUNC_kptr_xchg &&
+ !is_stack_argno(argno) && argno == BPF_REG_2) {
+ if (map_kptr_match_type(env, meta->kptr_field, reg, argno))
return -EACCES;
}
break;
@@ -8181,7 +8238,7 @@ reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
}
static int check_func_arg_reg_off(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno,
+ const struct bpf_reg_state *reg, int argno,
enum bpf_arg_type arg_type)
{
u32 type = reg->type;
@@ -8207,8 +8264,8 @@ static int check_func_arg_reg_off(struct bpf_verifier_env *env,
* to give the user a better error message.
*/
if (!tnum_is_const(reg->var_off) || reg->var_off.value != 0) {
- verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n",
- regno);
+ verbose(env, "%s must have zero offset when passed to release func or trusted arg to kfunc\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
}
@@ -8244,7 +8301,7 @@ static int check_func_arg_reg_off(struct bpf_verifier_env *env,
* cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we
* still need to do checks instead of returning.
*/
- return __check_ptr_off_reg(env, reg, regno, true);
+ return __check_ptr_off_reg(env, reg, argno, true);
case PTR_TO_CTX:
/*
* Allow fixed and variable offsets for syscall context, but
@@ -8256,7 +8313,7 @@ static int check_func_arg_reg_off(struct bpf_verifier_env *env,
return 0;
fallthrough;
default:
- return __check_ptr_off_reg(env, reg, regno, false);
+ return __check_ptr_off_reg(env, reg, argno, false);
}
}
@@ -8326,8 +8383,8 @@ static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env,
return state->stack[spi].spilled_ptr.dynptr.type;
}
-static int check_reg_const_str(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, u32 regno)
+static int check_arg_const_str(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg, u32 argno)
{
struct bpf_map *map = reg->map_ptr;
int err;
@@ -8339,17 +8396,18 @@ static int check_reg_const_str(struct bpf_verifier_env *env,
return -EINVAL;
if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY) {
- verbose(env, "R%d points to insn_array map which cannot be used as const string\n", regno);
+ verbose(env, "%s points to insn_array map which cannot be used as const string\n",
+ reg_arg_name(env, argno));
return -EACCES;
}
if (!bpf_map_is_rdonly(map)) {
- verbose(env, "R%d does not point to a readonly map'\n", regno);
+ verbose(env, "%s does not point to a readonly map'\n", reg_arg_name(env, argno));
return -EACCES;
}
if (!tnum_is_const(reg->var_off)) {
- verbose(env, "R%d is not a constant address'\n", regno);
+ verbose(env, "%s is not a constant address'\n", reg_arg_name(env, argno));
return -EACCES;
}
@@ -8358,7 +8416,7 @@ static int check_reg_const_str(struct bpf_verifier_env *env,
return -EACCES;
}
- err = check_map_access(env, reg, regno, 0,
+ err = check_map_access(env, reg, argno, 0,
map->value_size - reg->var_off.value, false,
ACCESS_HELPER);
if (err)
@@ -8697,7 +8755,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
break;
case ARG_PTR_TO_CONST_STR:
{
- err = check_reg_const_str(env, reg, regno);
+ err = check_arg_const_str(env, reg, regno);
if (err)
return err;
break;
@@ -9286,13 +9344,14 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
* verifier sees.
*/
for (i = 0; i < sub->arg_cnt; i++) {
+ u32 argno = make_argno(i);
u32 regno = i + 1;
struct bpf_reg_state *reg = ®s[regno];
struct bpf_subprog_arg_info *arg = &sub->args[i];
if (arg->arg_type == ARG_ANYTHING) {
if (reg->type != SCALAR_VALUE) {
- bpf_log(log, "R%d is not a scalar\n", regno);
+ bpf_log(log, "%s is not a scalar\n", reg_arg_name(env, argno));
return -EINVAL;
}
} else if (arg->arg_type & PTR_UNTRUSTED) {
@@ -9302,24 +9361,26 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
* invalid memory access.
*/
} else if (arg->arg_type == ARG_PTR_TO_CTX) {
- ret = check_func_arg_reg_off(env, reg, regno, ARG_PTR_TO_CTX);
+ ret = check_func_arg_reg_off(env, reg, argno, ARG_PTR_TO_CTX);
if (ret < 0)
return ret;
/* If function expects ctx type in BTF check that caller
* is passing PTR_TO_CTX.
*/
if (reg->type != PTR_TO_CTX) {
- bpf_log(log, "arg#%d expects pointer to ctx\n", i);
+ bpf_log(log, "%s expects pointer to ctx\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
} else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) {
- ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE);
+ ret = check_func_arg_reg_off(env, reg, argno, ARG_DONTCARE);
if (ret < 0)
return ret;
- if (check_mem_reg(env, reg, regno, arg->mem_size))
+ if (check_mem_reg(env, reg, argno, arg->mem_size))
return -EINVAL;
if (!(arg->arg_type & PTR_MAYBE_NULL) && (reg->type & PTR_MAYBE_NULL)) {
- bpf_log(log, "arg#%d is expected to be non-NULL\n", i);
+ bpf_log(log, "%s is expected to be non-NULL\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
} else if (base_type(arg->arg_type) == ARG_PTR_TO_ARENA) {
@@ -9331,15 +9392,16 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
* run-time debug nightmare.
*/
if (reg->type != PTR_TO_ARENA && reg->type != SCALAR_VALUE) {
- bpf_log(log, "R%d is not a pointer to arena or scalar.\n", regno);
+ bpf_log(log, "%s is not a pointer to arena or scalar.\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
} else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) {
- ret = check_func_arg_reg_off(env, reg, regno, ARG_PTR_TO_DYNPTR);
+ ret = check_func_arg_reg_off(env, reg, argno, ARG_PTR_TO_DYNPTR);
if (ret)
return ret;
- ret = process_dynptr_func(env, reg, regno, -1, arg->arg_type, 0);
+ ret = process_dynptr_func(env, reg, argno, -1, arg->arg_type, 0);
if (ret)
return ret;
} else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
@@ -9350,12 +9412,13 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
continue;
memset(&meta, 0, sizeof(meta)); /* leave func_id as zero */
- err = check_reg_type(env, reg, regno, arg->arg_type, &arg->btf_id, &meta);
- err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type);
+ err = check_reg_type(env, reg, argno, arg->arg_type, &arg->btf_id, &meta);
+ err = err ?: check_func_arg_reg_off(env, reg, argno, arg->arg_type);
if (err)
return err;
} else {
- verifier_bug(env, "unrecognized arg#%d type %d", i, arg->arg_type);
+ verifier_bug(env, "unrecognized %s type %d",
+ reg_arg_name(env, argno), arg->arg_type);
return -EFAULT;
}
}
@@ -11398,8 +11461,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
if (!btf_type_is_struct(ref_t)) {
- verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
- meta->func_name, argno, btf_type_str(ref_t), ref_tname);
+ verbose(env, "kernel function %s %s pointer type %s %s is not supported\n",
+ meta->func_name, reg_arg_name(env, make_argno(argno)),
+ btf_type_str(ref_t), ref_tname);
return -EINVAL;
}
return KF_ARG_PTR_TO_BTF_ID;
@@ -11415,8 +11479,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
*/
if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) &&
(arg_mem_size ? !btf_type_is_void(ref_t) : 1)) {
- verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
- argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
+ verbose(env, "%s pointer type %s %s must point to %sscalar, or struct with scalar\n",
+ reg_arg_name(env, make_argno(argno)),
+ btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
return -EINVAL;
}
return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM;
@@ -11485,15 +11550,16 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
*/
taking_projection = btf_is_projection_of(ref_tname, reg_ref_tname);
if (!taking_projection && !struct_same) {
- verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
- meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1,
+ verbose(env, "kernel function %s %s expected pointer to %s %s but %s has a pointer to %s %s\n",
+ meta->func_name, reg_arg_name(env, make_argno(argno)),
+ btf_type_str(ref_t), ref_tname, reg_arg_name(env, make_argno(argno)),
btf_type_str(reg_ref_t), reg_ref_tname);
return -EINVAL;
}
return 0;
}
-static int process_irq_flag(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int regno,
+static int process_irq_flag(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int argno,
struct bpf_kfunc_call_arg_meta *meta)
{
int err, kfunc_class = IRQ_NATIVE_KFUNC;
@@ -11516,11 +11582,13 @@ static int process_irq_flag(struct bpf_verifier_env *env, struct bpf_reg_state *
if (irq_save) {
if (!is_irq_flag_reg_valid_uninit(env, reg)) {
- verbose(env, "expected uninitialized irq flag as arg#%d\n", regno - 1);
+ verbose(env, "expected uninitialized irq flag as %s\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
- err = check_mem_access(env, env->insn_idx, reg, regno, 0, BPF_DW, BPF_WRITE, -1, false, false);
+ err = check_mem_access(env, env->insn_idx, reg, argno, 0, BPF_DW,
+ BPF_WRITE, -1, false, false);
if (err)
return err;
@@ -11530,7 +11598,8 @@ static int process_irq_flag(struct bpf_verifier_env *env, struct bpf_reg_state *
} else {
err = is_irq_flag_reg_valid_init(env, reg);
if (err) {
- verbose(env, "expected an initialized irq flag as arg#%d\n", regno - 1);
+ verbose(env, "expected an initialized irq flag as %s\n",
+ reg_arg_name(env, argno));
return err;
}
@@ -11821,7 +11890,7 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
static int
__process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, u32 regno,
+ struct bpf_reg_state *reg, u32 argno,
struct bpf_kfunc_call_arg_meta *meta,
enum btf_field_type head_field_type,
struct btf_field **head_field)
@@ -11842,8 +11911,8 @@ __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
head_type_name = btf_field_type_name(head_field_type);
if (!tnum_is_const(reg->var_off)) {
verbose(env,
- "R%d doesn't have constant offset. %s has to be at the constant offset\n",
- regno, head_type_name);
+ "%s doesn't have constant offset. %s has to be at the constant offset\n",
+ reg_arg_name(env, argno), head_type_name);
return -EINVAL;
}
@@ -11871,24 +11940,24 @@ __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
}
static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, u32 regno,
+ struct bpf_reg_state *reg, u32 argno,
struct bpf_kfunc_call_arg_meta *meta)
{
- return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD,
+ return __process_kf_arg_ptr_to_graph_root(env, reg, argno, meta, BPF_LIST_HEAD,
&meta->arg_list_head.field);
}
static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, u32 regno,
+ struct bpf_reg_state *reg, u32 argno,
struct bpf_kfunc_call_arg_meta *meta)
{
- return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT,
+ return __process_kf_arg_ptr_to_graph_root(env, reg, argno, meta, BPF_RB_ROOT,
&meta->arg_rbtree_root.field);
}
static int
__process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, u32 regno,
+ struct bpf_reg_state *reg, u32 argno,
struct bpf_kfunc_call_arg_meta *meta,
enum btf_field_type head_field_type,
enum btf_field_type node_field_type,
@@ -11910,8 +11979,8 @@ __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
node_type_name = btf_field_type_name(node_field_type);
if (!tnum_is_const(reg->var_off)) {
verbose(env,
- "R%d doesn't have constant offset. %s has to be at the constant offset\n",
- regno, node_type_name);
+ "%s doesn't have constant offset. %s has to be at the constant offset\n",
+ reg_arg_name(env, argno), node_type_name);
return -EINVAL;
}
@@ -11952,19 +12021,19 @@ __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
}
static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, u32 regno,
+ struct bpf_reg_state *reg, u32 argno,
struct bpf_kfunc_call_arg_meta *meta)
{
- return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
+ return __process_kf_arg_ptr_to_graph_node(env, reg, argno, meta,
BPF_LIST_HEAD, BPF_LIST_NODE,
&meta->arg_list_head.field);
}
static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg, u32 regno,
+ struct bpf_reg_state *reg, u32 argno,
struct bpf_kfunc_call_arg_meta *meta)
{
- return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
+ return __process_kf_arg_ptr_to_graph_node(env, reg, argno, meta,
BPF_RB_ROOT, BPF_RB_NODE,
&meta->arg_rbtree_root.field);
}
@@ -12016,6 +12085,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[i + 1];
const struct btf_type *t, *ref_t, *resolve_ret;
enum bpf_arg_type arg_type = ARG_DONTCARE;
+ u32 argno = make_argno(i);
u32 regno = i + 1, ref_id, type_size;
bool is_ret_buf_sz = false;
int kf_arg_type;
@@ -12038,7 +12108,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if (btf_type_is_scalar(t)) {
if (reg->type != SCALAR_VALUE) {
- verbose(env, "R%d is not a scalar\n", regno);
+ verbose(env, "%s is not a scalar\n", reg_arg_name(env, argno));
return -EINVAL;
}
@@ -12048,7 +12118,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EFAULT;
}
if (!tnum_is_const(reg->var_off)) {
- verbose(env, "R%d must be a known constant\n", regno);
+ verbose(env, "%s must be a known constant\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
ret = mark_chain_precision(env, regno);
@@ -12070,7 +12141,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
if (!tnum_is_const(reg->var_off)) {
- verbose(env, "R%d is not a const\n", regno);
+ verbose(env, "%s is not a const\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
@@ -12083,20 +12155,22 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
if (!btf_type_is_ptr(t)) {
- verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t));
+ verbose(env, "Unrecognized %s type %s\n",
+ reg_arg_name(env, argno), btf_type_str(t));
return -EINVAL;
}
if ((bpf_register_is_null(reg) || type_may_be_null(reg->type)) &&
!is_kfunc_arg_nullable(meta->btf, &args[i])) {
- verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
+ verbose(env, "Possibly NULL pointer passed to trusted %s\n",
+ reg_arg_name(env, argno));
return -EACCES;
}
if (reg->ref_obj_id) {
if (is_kfunc_release(meta) && meta->ref_obj_id) {
- verifier_bug(env, "more than one arg with ref_obj_id R%d %u %u",
- regno, reg->ref_obj_id,
+ verifier_bug(env, "more than one arg with ref_obj_id %s %u %u",
+ reg_arg_name(env, argno), reg->ref_obj_id,
meta->ref_obj_id);
return -EFAULT;
}
@@ -12117,7 +12191,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
continue;
case KF_ARG_PTR_TO_MAP:
if (!reg->map_ptr) {
- verbose(env, "pointer in R%d isn't map pointer\n", regno);
+ verbose(env, "pointer in %s isn't map pointer\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
if (meta->map.ptr && (reg->map_ptr->record->wq_off >= 0 ||
@@ -12155,11 +12230,13 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_BTF_ID:
if (!is_trusted_reg(reg)) {
if (!is_kfunc_rcu(meta)) {
- verbose(env, "R%d must be referenced or trusted\n", regno);
+ verbose(env, "%s must be referenced or trusted\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
if (!is_rcu_reg(reg)) {
- verbose(env, "R%d must be a rcu pointer\n", regno);
+ verbose(env, "%s must be a rcu pointer\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
}
@@ -12191,15 +12268,15 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if (is_kfunc_release(meta) && reg->ref_obj_id)
arg_type |= OBJ_RELEASE;
- ret = check_func_arg_reg_off(env, reg, regno, arg_type);
+ ret = check_func_arg_reg_off(env, reg, argno, arg_type);
if (ret < 0)
return ret;
switch (kf_arg_type) {
case KF_ARG_PTR_TO_CTX:
if (reg->type != PTR_TO_CTX) {
- verbose(env, "arg#%d expected pointer to ctx, but got %s\n",
- i, reg_type_str(env, reg->type));
+ verbose(env, "%s expected pointer to ctx, but got %s\n",
+ reg_arg_name(env, argno), reg_type_str(env, reg->type));
return -EINVAL;
}
@@ -12213,16 +12290,19 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_ALLOC_BTF_ID:
if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) {
if (!is_bpf_obj_drop_kfunc(meta->func_id)) {
- verbose(env, "arg#%d expected for bpf_obj_drop()\n", i);
+ verbose(env, "%s expected for bpf_obj_drop()\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
} else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) {
if (!is_bpf_percpu_obj_drop_kfunc(meta->func_id)) {
- verbose(env, "arg#%d expected for bpf_percpu_obj_drop()\n", i);
+ verbose(env, "%s expected for bpf_percpu_obj_drop()\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
} else {
- verbose(env, "arg#%d expected pointer to allocated object\n", i);
+ verbose(env, "%s expected pointer to allocated object\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
if (!reg->ref_obj_id) {
@@ -12273,7 +12353,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
}
- ret = process_dynptr_func(env, reg, regno, insn_idx, dynptr_arg_type, clone_ref_obj_id);
+ ret = process_dynptr_func(env, reg, argno, insn_idx,
+ dynptr_arg_type, clone_ref_obj_id);
if (ret < 0)
return ret;
@@ -12298,55 +12379,59 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EINVAL;
}
}
- ret = process_iter_arg(env, reg, regno, insn_idx, meta);
+ ret = process_iter_arg(env, reg, argno, insn_idx, meta);
if (ret < 0)
return ret;
break;
case KF_ARG_PTR_TO_LIST_HEAD:
if (reg->type != PTR_TO_MAP_VALUE &&
reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
- verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
+ verbose(env, "%s expected pointer to map value or allocated object\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
verbose(env, "allocated object must be referenced\n");
return -EINVAL;
}
- ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta);
+ ret = process_kf_arg_ptr_to_list_head(env, reg, argno, meta);
if (ret < 0)
return ret;
break;
case KF_ARG_PTR_TO_RB_ROOT:
if (reg->type != PTR_TO_MAP_VALUE &&
reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
- verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
+ verbose(env, "%s expected pointer to map value or allocated object\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
verbose(env, "allocated object must be referenced\n");
return -EINVAL;
}
- ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta);
+ ret = process_kf_arg_ptr_to_rbtree_root(env, reg, argno, meta);
if (ret < 0)
return ret;
break;
case KF_ARG_PTR_TO_LIST_NODE:
if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
- verbose(env, "arg#%d expected pointer to allocated object\n", i);
+ verbose(env, "%s expected pointer to allocated object\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
if (!reg->ref_obj_id) {
verbose(env, "allocated object must be referenced\n");
return -EINVAL;
}
- ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta);
+ ret = process_kf_arg_ptr_to_list_node(env, reg, argno, meta);
if (ret < 0)
return ret;
break;
case KF_ARG_PTR_TO_RB_NODE:
if (is_bpf_rbtree_add_kfunc(meta->func_id)) {
if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
- verbose(env, "arg#%d expected pointer to allocated object\n", i);
+ verbose(env, "%s expected pointer to allocated object\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
if (!reg->ref_obj_id) {
@@ -12364,7 +12449,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
}
- ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta);
+ ret = process_kf_arg_ptr_to_rbtree_node(env, reg, argno, meta);
if (ret < 0)
return ret;
break;
@@ -12379,7 +12464,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if ((base_type(reg->type) != PTR_TO_BTF_ID ||
(bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) &&
!reg2btf_ids[base_type(reg->type)]) {
- verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type));
+ verbose(env, "%s is %s ", reg_arg_name(env, argno),
+ reg_type_str(env, reg->type));
verbose(env, "expected %s or socket\n",
reg_type_str(env, base_type(reg->type) |
(type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
@@ -12392,11 +12478,12 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_MEM:
resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
if (IS_ERR(resolve_ret)) {
- verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
- i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret));
+ verbose(env, "%s reference type('%s %s') size cannot be determined: %ld\n",
+ reg_arg_name(env, argno), btf_type_str(ref_t),
+ ref_tname, PTR_ERR(resolve_ret));
return -EINVAL;
}
- ret = check_mem_reg(env, reg, regno, type_size);
+ ret = check_mem_reg(env, reg, argno, type_size);
if (ret < 0)
return ret;
break;
@@ -12408,9 +12495,11 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
const struct btf_param *size_arg = &args[i + 1];
if (!bpf_register_is_null(buff_reg) || !is_kfunc_arg_nullable(meta->btf, buff_arg)) {
- ret = check_kfunc_mem_size_reg(env, buff_reg, size_reg, regno);
+ ret = check_kfunc_mem_size_reg(env, buff_reg, size_reg, i);
if (ret < 0) {
- verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1);
+ verbose(env, "%s and ", reg_arg_name(env, argno));
+ verbose(env, "%s memory, len pair leads to invalid memory access\n",
+ reg_arg_name(env, next_argno(argno)));
return ret;
}
}
@@ -12421,7 +12510,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EFAULT;
}
if (!tnum_is_const(size_reg->var_off)) {
- verbose(env, "R%d must be a known constant\n", regno + 1);
+ verbose(env, "%s must be a known constant\n",
+ reg_arg_name(env, next_argno(argno)));
return -EINVAL;
}
meta->arg_constant.found = true;
@@ -12434,14 +12524,16 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
case KF_ARG_PTR_TO_CALLBACK:
if (reg->type != PTR_TO_FUNC) {
- verbose(env, "arg%d expected pointer to func\n", i);
+ verbose(env, "%s expected pointer to func\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
meta->subprogno = reg->subprogno;
break;
case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
if (!type_is_ptr_alloc_obj(reg->type)) {
- verbose(env, "arg#%d is neither owning or non-owning ref\n", i);
+ verbose(env, "%s is neither owning or non-owning ref\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
if (!type_is_non_owning_ref(reg->type))
@@ -12454,7 +12546,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
if (rec->refcount_off < 0) {
- verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i);
+ verbose(env, "%s doesn't point to a type with bpf_refcount field\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
@@ -12463,46 +12556,51 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
break;
case KF_ARG_PTR_TO_CONST_STR:
if (reg->type != PTR_TO_MAP_VALUE) {
- verbose(env, "arg#%d doesn't point to a const string\n", i);
+ verbose(env, "%s doesn't point to a const string\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
- ret = check_reg_const_str(env, reg, regno);
+ ret = check_arg_const_str(env, reg, argno);
if (ret)
return ret;
break;
case KF_ARG_PTR_TO_WORKQUEUE:
if (reg->type != PTR_TO_MAP_VALUE) {
- verbose(env, "arg#%d doesn't point to a map value\n", i);
+ verbose(env, "%s doesn't point to a map value\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
- ret = check_map_field_pointer(env, reg, regno, BPF_WORKQUEUE, &meta->map);
+ ret = check_map_field_pointer(env, reg, argno, BPF_WORKQUEUE, &meta->map);
if (ret < 0)
return ret;
break;
case KF_ARG_PTR_TO_TIMER:
if (reg->type != PTR_TO_MAP_VALUE) {
- verbose(env, "arg#%d doesn't point to a map value\n", i);
+ verbose(env, "%s doesn't point to a map value\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
- ret = process_timer_kfunc(env, reg, regno, meta);
+ ret = process_timer_kfunc(env, reg, argno, meta);
if (ret < 0)
return ret;
break;
case KF_ARG_PTR_TO_TASK_WORK:
if (reg->type != PTR_TO_MAP_VALUE) {
- verbose(env, "arg#%d doesn't point to a map value\n", i);
+ verbose(env, "%s doesn't point to a map value\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
- ret = check_map_field_pointer(env, reg, regno, BPF_TASK_WORK, &meta->map);
+ ret = check_map_field_pointer(env, reg, argno, BPF_TASK_WORK, &meta->map);
if (ret < 0)
return ret;
break;
case KF_ARG_PTR_TO_IRQ_FLAG:
if (reg->type != PTR_TO_STACK) {
- verbose(env, "arg#%d doesn't point to an irq flag on stack\n", i);
+ verbose(env, "%s doesn't point to an irq flag on stack\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
- ret = process_irq_flag(env, reg, regno, meta);
+ ret = process_irq_flag(env, reg, argno, meta);
if (ret < 0)
return ret;
break;
@@ -12511,7 +12609,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
int flags = PROCESS_RES_LOCK;
if (reg->type != PTR_TO_MAP_VALUE && reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
- verbose(env, "arg#%d doesn't point to map value or allocated object\n", i);
+ verbose(env, "%s doesn't point to map value or allocated object\n",
+ reg_arg_name(env, argno));
return -EINVAL;
}
@@ -12523,7 +12622,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if (meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave] ||
meta->func_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore])
flags |= PROCESS_LOCK_IRQ;
- ret = process_spin_lock(env, reg, regno, flags);
+ ret = process_spin_lock(env, reg, argno, flags);
if (ret < 0)
return ret;
break;
@@ -18737,7 +18836,7 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
mark_reg_unknown(env, regs, i);
} else {
verifier_bug(env, "unhandled arg#%d type %d",
- i - BPF_REG_1, arg->arg_type);
+ i - BPF_REG_1 + 1, arg->arg_type);
ret = -EFAULT;
goto out;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
index 215878ea04de..b33dba4b126e 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
@@ -11,18 +11,18 @@ struct {
const char *prog_name;
const char *err_msg;
} test_bpf_nf_fail_tests[] = {
- { "alloc_release", "kernel function bpf_ct_release args#0 expected pointer to STRUCT nf_conn but" },
- { "insert_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" },
- { "lookup_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" },
- { "set_timeout_after_insert", "kernel function bpf_ct_set_timeout args#0 expected pointer to STRUCT nf_conn___init but" },
- { "set_status_after_insert", "kernel function bpf_ct_set_status args#0 expected pointer to STRUCT nf_conn___init but" },
- { "change_timeout_after_alloc", "kernel function bpf_ct_change_timeout args#0 expected pointer to STRUCT nf_conn but" },
- { "change_status_after_alloc", "kernel function bpf_ct_change_status args#0 expected pointer to STRUCT nf_conn but" },
+ { "alloc_release", "kernel function bpf_ct_release R1 expected pointer to STRUCT nf_conn but" },
+ { "insert_insert", "kernel function bpf_ct_insert_entry R1 expected pointer to STRUCT nf_conn___init but" },
+ { "lookup_insert", "kernel function bpf_ct_insert_entry R1 expected pointer to STRUCT nf_conn___init but" },
+ { "set_timeout_after_insert", "kernel function bpf_ct_set_timeout R1 expected pointer to STRUCT nf_conn___init but" },
+ { "set_status_after_insert", "kernel function bpf_ct_set_status R1 expected pointer to STRUCT nf_conn___init but" },
+ { "change_timeout_after_alloc", "kernel function bpf_ct_change_timeout R1 expected pointer to STRUCT nf_conn but" },
+ { "change_status_after_alloc", "kernel function bpf_ct_change_status R1 expected pointer to STRUCT nf_conn but" },
{ "write_not_allowlisted_field", "no write support to nf_conn at off" },
- { "lookup_null_bpf_tuple", "Possibly NULL pointer passed to trusted arg1" },
- { "lookup_null_bpf_opts", "Possibly NULL pointer passed to trusted arg3" },
- { "xdp_lookup_null_bpf_tuple", "Possibly NULL pointer passed to trusted arg1" },
- { "xdp_lookup_null_bpf_opts", "Possibly NULL pointer passed to trusted arg3" },
+ { "lookup_null_bpf_tuple", "Possibly NULL pointer passed to trusted R2" },
+ { "lookup_null_bpf_opts", "Possibly NULL pointer passed to trusted R4" },
+ { "xdp_lookup_null_bpf_tuple", "Possibly NULL pointer passed to trusted R2" },
+ { "xdp_lookup_null_bpf_opts", "Possibly NULL pointer passed to trusted R4" },
};
enum {
diff --git a/tools/testing/selftests/bpf/prog_tests/cb_refs.c b/tools/testing/selftests/bpf/prog_tests/cb_refs.c
index c40df623a8f7..6300b67a3a84 100644
--- a/tools/testing/selftests/bpf/prog_tests/cb_refs.c
+++ b/tools/testing/selftests/bpf/prog_tests/cb_refs.c
@@ -12,7 +12,7 @@ struct {
const char *err_msg;
} cb_refs_tests[] = {
{ "underflow_prog", "must point to scalar, or struct with scalar" },
- { "leak_prog", "Possibly NULL pointer passed to helper arg2" },
+ { "leak_prog", "Possibly NULL pointer passed to helper R2" },
{ "nested_cb", "Unreleased reference id=4 alloc_insn=2" }, /* alloc_insn=2{4,5} */
{ "non_cb_transfer_ref", "Unreleased reference id=4 alloc_insn=1" }, /* alloc_insn=1{1,2} */
};
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
index 62f3fb79f5d1..3df07680f9e0 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -68,7 +68,7 @@ static struct kfunc_test_params kfunc_tests[] = {
TC_FAIL(kfunc_call_test_get_mem_fail_oob, 0, "min value is outside of the allowed memory range"),
TC_FAIL(kfunc_call_test_get_mem_fail_not_const, 0, "is not a const"),
TC_FAIL(kfunc_call_test_mem_acquire_fail, 0, "acquire kernel function does not return PTR_TO_BTF_ID"),
- TC_FAIL(kfunc_call_test_pointer_arg_type_mismatch, 0, "arg#0 expected pointer to ctx, but got scalar"),
+ TC_FAIL(kfunc_call_test_pointer_arg_type_mismatch, 0, "R1 expected pointer to ctx, but got scalar"),
/* success cases */
TC_TEST(kfunc_call_test1, 12),
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
index 6f25b5f39a79..dbff099860ba 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
@@ -81,8 +81,8 @@ static struct {
{ "direct_write_node", "direct access to bpf_list_node is disallowed" },
{ "use_after_unlock_push_front", "invalid mem access 'scalar'" },
{ "use_after_unlock_push_back", "invalid mem access 'scalar'" },
- { "double_push_front", "arg#1 expected pointer to allocated object" },
- { "double_push_back", "arg#1 expected pointer to allocated object" },
+ { "double_push_front", "R2 expected pointer to allocated object" },
+ { "double_push_back", "R2 expected pointer to allocated object" },
{ "no_node_value_type", "bpf_list_node not found at offset=0" },
{ "incorrect_value_type",
"operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, "
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
index 9fe9c4a4e8f6..a875ba8e5007 100644
--- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
@@ -29,7 +29,7 @@ static struct __cgrps_kfunc_map_value *insert_lookup_cgrp(struct cgroup *cgrp)
}
SEC("tp_btf/cgroup_mkdir")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
@@ -48,7 +48,7 @@ int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path
}
SEC("tp_btf/cgroup_mkdir")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(cgrp_kfunc_acquire_no_null_check, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
@@ -64,7 +64,7 @@ int BPF_PROG(cgrp_kfunc_acquire_no_null_check, struct cgroup *cgrp, const char *
}
SEC("tp_btf/cgroup_mkdir")
-__failure __msg("arg#0 pointer type STRUCT cgroup must point")
+__failure __msg("R1 pointer type STRUCT cgroup must point")
int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired, *stack_cgrp = (struct cgroup *)&path;
@@ -106,7 +106,7 @@ int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char
}
SEC("tp_btf/cgroup_mkdir")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired;
@@ -175,7 +175,7 @@ int BPF_PROG(cgrp_kfunc_rcu_get_release, struct cgroup *cgrp, const char *path)
}
SEC("tp_btf/cgroup_mkdir")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path)
{
struct __cgrps_kfunc_map_value *v;
@@ -191,7 +191,7 @@ int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path
}
SEC("tp_btf/cgroup_mkdir")
-__failure __msg("arg#0 pointer type STRUCT cgroup must point")
+__failure __msg("R1 pointer type STRUCT cgroup must point")
int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path)
{
struct cgroup *acquired = (struct cgroup *)&path;
@@ -203,7 +203,7 @@ int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path)
}
SEC("tp_btf/cgroup_mkdir")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path)
{
struct __cgrps_kfunc_map_value local, *v;
diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c
index 61c32e91e8c3..4c45346fe6f7 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_failure.c
+++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c
@@ -45,7 +45,7 @@ int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags)
}
SEC("tp_btf/task_newtask")
-__failure __msg("NULL pointer passed to trusted arg0")
+__failure __msg("NULL pointer passed to trusted R1")
int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
@@ -73,7 +73,7 @@ int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_fla
}
SEC("tp_btf/task_newtask")
-__failure __msg("bpf_cpumask_set_cpu args#1 expected pointer to STRUCT bpf_cpumask")
+__failure __msg("bpf_cpumask_set_cpu R2 expected pointer to STRUCT bpf_cpumask")
int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags)
{
/* Can't set the CPU of a non-struct bpf_cpumask. */
@@ -107,7 +107,7 @@ int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_
}
SEC("tp_btf/task_newtask")
-__failure __msg("NULL pointer passed to trusted arg0")
+__failure __msg("NULL pointer passed to trusted R1")
int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags)
{
/* NULL passed to kfunc. */
@@ -151,7 +151,7 @@ int BPF_PROG(test_global_mask_out_of_rcu, struct task_struct *task, u64 clone_fl
}
SEC("tp_btf/task_newtask")
-__failure __msg("NULL pointer passed to trusted arg1")
+__failure __msg("NULL pointer passed to trusted R2")
int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *local, *prev;
@@ -179,7 +179,7 @@ int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone
}
SEC("tp_btf/task_newtask")
-__failure __msg("Possibly NULL pointer passed to helper arg2")
+__failure __msg("Possibly NULL pointer passed to helper R2")
int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *prev, *curr;
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index b62773ce5219..dbd97add5a5a 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -149,7 +149,7 @@ int ringbuf_release_uninit_dynptr(void *ctx)
/* A dynptr can't be used after it has been invalidated */
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #2")
+__failure __msg("Expected an initialized dynptr as R3")
int use_after_invalid(void *ctx)
{
struct bpf_dynptr ptr;
@@ -448,7 +448,7 @@ int invalid_helper2(void *ctx)
/* A bpf_dynptr is invalidated if it's been written into */
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #0")
+__failure __msg("Expected an initialized dynptr as R1")
int invalid_write1(void *ctx)
{
struct bpf_dynptr ptr;
@@ -1642,7 +1642,7 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
/* bpf_dynptr_adjust can only be called on initialized dynptrs */
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #0")
+__failure __msg("Expected an initialized dynptr as R1")
int dynptr_adjust_invalid(void *ctx)
{
struct bpf_dynptr ptr = {};
@@ -1655,7 +1655,7 @@ int dynptr_adjust_invalid(void *ctx)
/* bpf_dynptr_is_null can only be called on initialized dynptrs */
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #0")
+__failure __msg("Expected an initialized dynptr as R1")
int dynptr_is_null_invalid(void *ctx)
{
struct bpf_dynptr ptr = {};
@@ -1668,7 +1668,7 @@ int dynptr_is_null_invalid(void *ctx)
/* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #0")
+__failure __msg("Expected an initialized dynptr as R1")
int dynptr_is_rdonly_invalid(void *ctx)
{
struct bpf_dynptr ptr = {};
@@ -1681,7 +1681,7 @@ int dynptr_is_rdonly_invalid(void *ctx)
/* bpf_dynptr_size can only be called on initialized dynptrs */
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #0")
+__failure __msg("Expected an initialized dynptr as R1")
int dynptr_size_invalid(void *ctx)
{
struct bpf_dynptr ptr = {};
@@ -1694,7 +1694,7 @@ int dynptr_size_invalid(void *ctx)
/* Only initialized dynptrs can be cloned */
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #0")
+__failure __msg("Expected an initialized dynptr as R1")
int clone_invalid1(void *ctx)
{
struct bpf_dynptr ptr1 = {};
@@ -1728,7 +1728,7 @@ int clone_invalid2(struct xdp_md *xdp)
/* Invalidating a dynptr should invalidate its clones */
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #2")
+__failure __msg("Expected an initialized dynptr as R3")
int clone_invalidate1(void *ctx)
{
struct bpf_dynptr clone;
@@ -1749,7 +1749,7 @@ int clone_invalidate1(void *ctx)
/* Invalidating a dynptr should invalidate its parent */
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #2")
+__failure __msg("Expected an initialized dynptr as R3")
int clone_invalidate2(void *ctx)
{
struct bpf_dynptr ptr;
@@ -1770,7 +1770,7 @@ int clone_invalidate2(void *ctx)
/* Invalidating a dynptr should invalidate its siblings */
SEC("?raw_tp")
-__failure __msg("Expected an initialized dynptr as arg #2")
+__failure __msg("Expected an initialized dynptr as R3")
int clone_invalidate3(void *ctx)
{
struct bpf_dynptr ptr;
@@ -1981,7 +1981,7 @@ __noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr)
}
SEC("?raw_tp")
-__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr")
+__failure __msg("R1 expected pointer to stack or const struct bpf_dynptr")
int test_dynptr_reg_type(void *ctx)
{
struct task_struct *current = NULL;
diff --git a/tools/testing/selftests/bpf/progs/file_reader_fail.c b/tools/testing/selftests/bpf/progs/file_reader_fail.c
index 32fe28ed2439..0739620dea8a 100644
--- a/tools/testing/selftests/bpf/progs/file_reader_fail.c
+++ b/tools/testing/selftests/bpf/progs/file_reader_fail.c
@@ -30,7 +30,7 @@ int on_nanosleep_unreleased_ref(void *ctx)
SEC("xdp")
__failure
-__msg("Expected a dynptr of type file as arg #0")
+__msg("Expected a dynptr of type file as R1")
int xdp_wrong_dynptr_type(struct xdp_md *xdp)
{
struct bpf_dynptr dynptr;
@@ -42,7 +42,7 @@ int xdp_wrong_dynptr_type(struct xdp_md *xdp)
SEC("xdp")
__failure
-__msg("Expected an initialized dynptr as arg #0")
+__msg("Expected an initialized dynptr as R1")
int xdp_no_dynptr_type(struct xdp_md *xdp)
{
struct bpf_dynptr dynptr;
diff --git a/tools/testing/selftests/bpf/progs/irq.c b/tools/testing/selftests/bpf/progs/irq.c
index e11e82d98904..a4a007866a33 100644
--- a/tools/testing/selftests/bpf/progs/irq.c
+++ b/tools/testing/selftests/bpf/progs/irq.c
@@ -15,7 +15,7 @@ struct bpf_res_spin_lock lockA __hidden SEC(".data.A");
struct bpf_res_spin_lock lockB __hidden SEC(".data.B");
SEC("?tc")
-__failure __msg("arg#0 doesn't point to an irq flag on stack")
+__failure __msg("R1 doesn't point to an irq flag on stack")
int irq_save_bad_arg(struct __sk_buff *ctx)
{
bpf_local_irq_save(&global_flags);
@@ -23,7 +23,7 @@ int irq_save_bad_arg(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("arg#0 doesn't point to an irq flag on stack")
+__failure __msg("R1 doesn't point to an irq flag on stack")
int irq_restore_bad_arg(struct __sk_buff *ctx)
{
bpf_local_irq_restore(&global_flags);
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index 86b74e3579d9..0fa70b133d93 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -1605,7 +1605,7 @@ int iter_subprog_check_stacksafe(const void *ctx)
struct bpf_iter_num global_it;
SEC("raw_tp")
-__failure __msg("arg#0 expected pointer to an iterator on stack")
+__failure __msg("R1 expected pointer to an iterator on stack")
int iter_new_bad_arg(const void *ctx)
{
bpf_iter_num_new(&global_it, 0, 1);
@@ -1613,7 +1613,7 @@ int iter_new_bad_arg(const void *ctx)
}
SEC("raw_tp")
-__failure __msg("arg#0 expected pointer to an iterator on stack")
+__failure __msg("R1 expected pointer to an iterator on stack")
int iter_next_bad_arg(const void *ctx)
{
bpf_iter_num_next(&global_it);
@@ -1621,7 +1621,7 @@ int iter_next_bad_arg(const void *ctx)
}
SEC("raw_tp")
-__failure __msg("arg#0 expected pointer to an iterator on stack")
+__failure __msg("R1 expected pointer to an iterator on stack")
int iter_destroy_bad_arg(const void *ctx)
{
bpf_iter_num_destroy(&global_it);
diff --git a/tools/testing/selftests/bpf/progs/iters_state_safety.c b/tools/testing/selftests/bpf/progs/iters_state_safety.c
index d273b46dfc7c..af8f9ec1ea98 100644
--- a/tools/testing/selftests/bpf/progs/iters_state_safety.c
+++ b/tools/testing/selftests/bpf/progs/iters_state_safety.c
@@ -73,7 +73,7 @@ int create_and_forget_to_destroy_fail(void *ctx)
}
SEC("?raw_tp")
-__failure __msg("expected an initialized iter_num as arg #0")
+__failure __msg("expected an initialized iter_num as R1")
int destroy_without_creating_fail(void *ctx)
{
/* init with zeros to stop verifier complaining about uninit stack */
@@ -91,7 +91,7 @@ int destroy_without_creating_fail(void *ctx)
}
SEC("?raw_tp")
-__failure __msg("expected an initialized iter_num as arg #0")
+__failure __msg("expected an initialized iter_num as R1")
int compromise_iter_w_direct_write_fail(void *ctx)
{
struct bpf_iter_num iter;
@@ -143,7 +143,7 @@ int compromise_iter_w_direct_write_and_skip_destroy_fail(void *ctx)
}
SEC("?raw_tp")
-__failure __msg("expected an initialized iter_num as arg #0")
+__failure __msg("expected an initialized iter_num as R1")
int compromise_iter_w_helper_write_fail(void *ctx)
{
struct bpf_iter_num iter;
@@ -230,7 +230,7 @@ int valid_stack_reuse(void *ctx)
}
SEC("?raw_tp")
-__failure __msg("expected uninitialized iter_num as arg #0")
+__failure __msg("expected uninitialized iter_num as R1")
int double_create_fail(void *ctx)
{
struct bpf_iter_num iter;
@@ -258,7 +258,7 @@ int double_create_fail(void *ctx)
}
SEC("?raw_tp")
-__failure __msg("expected an initialized iter_num as arg #0")
+__failure __msg("expected an initialized iter_num as R1")
int double_destroy_fail(void *ctx)
{
struct bpf_iter_num iter;
@@ -284,7 +284,7 @@ int double_destroy_fail(void *ctx)
}
SEC("?raw_tp")
-__failure __msg("expected an initialized iter_num as arg #0")
+__failure __msg("expected an initialized iter_num as R1")
int next_without_new_fail(void *ctx)
{
struct bpf_iter_num iter;
@@ -305,7 +305,7 @@ int next_without_new_fail(void *ctx)
}
SEC("?raw_tp")
-__failure __msg("expected an initialized iter_num as arg #0")
+__failure __msg("expected an initialized iter_num as R1")
int next_after_destroy_fail(void *ctx)
{
struct bpf_iter_num iter;
diff --git a/tools/testing/selftests/bpf/progs/iters_testmod.c b/tools/testing/selftests/bpf/progs/iters_testmod.c
index 5379e9960ffd..76012dbbdb41 100644
--- a/tools/testing/selftests/bpf/progs/iters_testmod.c
+++ b/tools/testing/selftests/bpf/progs/iters_testmod.c
@@ -29,7 +29,7 @@ int iter_next_trusted(const void *ctx)
}
SEC("raw_tp/sys_enter")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int iter_next_trusted_or_null(const void *ctx)
{
struct task_struct *cur_task = bpf_get_current_task_btf();
@@ -67,7 +67,7 @@ int iter_next_rcu(const void *ctx)
}
SEC("raw_tp/sys_enter")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int iter_next_rcu_or_null(const void *ctx)
{
struct task_struct *cur_task = bpf_get_current_task_btf();
diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
index 83791348bed5..9b760dac333e 100644
--- a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
+++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
@@ -79,7 +79,7 @@ int testmod_seq_truncated(const void *ctx)
SEC("?raw_tp")
__failure
-__msg("expected an initialized iter_testmod_seq as arg #1")
+__msg("expected an initialized iter_testmod_seq as R2")
int testmod_seq_getter_before_bad(const void *ctx)
{
struct bpf_iter_testmod_seq it;
@@ -89,7 +89,7 @@ int testmod_seq_getter_before_bad(const void *ctx)
SEC("?raw_tp")
__failure
-__msg("expected an initialized iter_testmod_seq as arg #1")
+__msg("expected an initialized iter_testmod_seq as R2")
int testmod_seq_getter_after_bad(const void *ctx)
{
struct bpf_iter_testmod_seq it;
diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
index 6443b320c732..431c218de068 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
@@ -364,7 +364,7 @@ int kptr_xchg_ref_state(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("Possibly NULL pointer passed to helper arg2")
+__failure __msg("Possibly NULL pointer passed to helper R2")
int kptr_xchg_possibly_null(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
index 81813c724fa9..08379c3b6a03 100644
--- a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
@@ -110,7 +110,7 @@ int BPF_PROG(test_array_map_3)
}
SEC("?fentry.s/bpf_fentry_test1")
-__failure __msg("arg#0 expected for bpf_percpu_obj_drop()")
+__failure __msg("R1 expected for bpf_percpu_obj_drop()")
int BPF_PROG(test_array_map_4)
{
struct val_t __percpu_kptr *p;
@@ -124,7 +124,7 @@ int BPF_PROG(test_array_map_4)
}
SEC("?fentry.s/bpf_fentry_test1")
-__failure __msg("arg#0 expected for bpf_obj_drop()")
+__failure __msg("R1 expected for bpf_obj_drop()")
int BPF_PROG(test_array_map_5)
{
struct val_t *p;
diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c
index 70b7baf9304b..555379952dcc 100644
--- a/tools/testing/selftests/bpf/progs/rbtree_fail.c
+++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c
@@ -134,7 +134,7 @@ long rbtree_api_remove_no_drop(void *ctx)
}
SEC("?tc")
-__failure __msg("arg#1 expected pointer to allocated object")
+__failure __msg("R2 expected pointer to allocated object")
long rbtree_api_add_to_multiple_trees(void *ctx)
{
struct node_data *n;
@@ -153,7 +153,7 @@ long rbtree_api_add_to_multiple_trees(void *ctx)
}
SEC("?tc")
-__failure __msg("Possibly NULL pointer passed to trusted arg1")
+__failure __msg("Possibly NULL pointer passed to trusted R2")
long rbtree_api_use_unchecked_remove_retval(void *ctx)
{
struct bpf_rb_node *res;
@@ -281,7 +281,7 @@ long add_with_cb(bool (cb)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
}
SEC("?tc")
-__failure __msg("arg#1 expected pointer to allocated object")
+__failure __msg("R2 expected pointer to allocated object")
long rbtree_api_add_bad_cb_bad_fn_call_add(void *ctx)
{
return add_with_cb(less__bad_fn_call_add);
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
index b2808bfcec29..7247a20c0a3b 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
@@ -54,7 +54,7 @@ long rbtree_refcounted_node_ref_escapes(void *ctx)
}
SEC("?tc")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
long refcount_acquire_maybe_null(void *ctx)
{
struct node_acquire *n, *m;
diff --git a/tools/testing/selftests/bpf/progs/stream_fail.c b/tools/testing/selftests/bpf/progs/stream_fail.c
index 8e8249f3521c..21428bb1ee59 100644
--- a/tools/testing/selftests/bpf/progs/stream_fail.c
+++ b/tools/testing/selftests/bpf/progs/stream_fail.c
@@ -23,7 +23,7 @@ int stream_vprintk_scalar_arg(void *ctx)
}
SEC("syscall")
-__failure __msg("arg#1 doesn't point to a const string")
+__failure __msg("R2 doesn't point to a const string")
int stream_vprintk_string_arg(void *ctx)
{
bpf_stream_vprintk(BPF_STDOUT, ctx, NULL, 0);
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
index 4c07ea193f72..41047d81ec42 100644
--- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
@@ -28,7 +28,7 @@ static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *ta
}
SEC("tp_btf/task_newtask")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
@@ -49,7 +49,7 @@ int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_f
}
SEC("tp_btf/task_newtask")
-__failure __msg("arg#0 pointer type STRUCT task_struct must point")
+__failure __msg("R1 pointer type STRUCT task_struct must point")
int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags;
@@ -100,7 +100,7 @@ int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe_rcu, struct task_struct *task,
}
SEC("tp_btf/task_newtask")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
@@ -149,7 +149,7 @@ int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_fla
}
SEC("tp_btf/task_newtask")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(task_kfunc_acquire_release_no_null_check, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
@@ -162,7 +162,7 @@ int BPF_PROG(task_kfunc_acquire_release_no_null_check, struct task_struct *task,
}
SEC("tp_btf/task_newtask")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags)
{
struct __tasks_kfunc_map_value *v;
@@ -178,7 +178,7 @@ int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_f
}
SEC("tp_btf/task_newtask")
-__failure __msg("arg#0 pointer type STRUCT task_struct must point")
+__failure __msg("R1 pointer type STRUCT task_struct must point")
int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired = (struct task_struct *)&clone_flags;
@@ -190,7 +190,7 @@ int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
}
SEC("tp_btf/task_newtask")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags)
{
struct __tasks_kfunc_map_value local, *v;
@@ -234,7 +234,7 @@ int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_
}
SEC("tp_btf/task_newtask")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
@@ -248,7 +248,7 @@ int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 cl
}
SEC("tp_btf/task_newtask")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(task_kfunc_from_vpid_no_null_check, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
diff --git a/tools/testing/selftests/bpf/progs/task_work_fail.c b/tools/testing/selftests/bpf/progs/task_work_fail.c
index 82e4b8913333..3186e7b4b24e 100644
--- a/tools/testing/selftests/bpf/progs/task_work_fail.c
+++ b/tools/testing/selftests/bpf/progs/task_work_fail.c
@@ -58,7 +58,7 @@ int mismatch_map(struct pt_regs *args)
}
SEC("perf_event")
-__failure __msg("arg#1 doesn't point to a map value")
+__failure __msg("R2 doesn't point to a map value")
int no_map_task_work(struct pt_regs *args)
{
struct task_struct *task;
@@ -70,7 +70,7 @@ int no_map_task_work(struct pt_regs *args)
}
SEC("perf_event")
-__failure __msg("Possibly NULL pointer passed to trusted arg1")
+__failure __msg("Possibly NULL pointer passed to trusted R2")
int task_work_null(struct pt_regs *args)
{
struct task_struct *task;
@@ -81,7 +81,7 @@ int task_work_null(struct pt_regs *args)
}
SEC("perf_event")
-__failure __msg("Possibly NULL pointer passed to trusted arg2")
+__failure __msg("Possibly NULL pointer passed to trusted R3")
int map_null(struct pt_regs *args)
{
struct elem *work;
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
index 2c156cd166af..332cda89caba 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
@@ -152,7 +152,7 @@ int change_status_after_alloc(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("Possibly NULL pointer passed to trusted arg1")
+__failure __msg("Possibly NULL pointer passed to trusted R2")
int lookup_null_bpf_tuple(struct __sk_buff *ctx)
{
struct bpf_ct_opts___local opts = {};
@@ -165,7 +165,7 @@ int lookup_null_bpf_tuple(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("Possibly NULL pointer passed to trusted arg3")
+__failure __msg("Possibly NULL pointer passed to trusted R4")
int lookup_null_bpf_opts(struct __sk_buff *ctx)
{
struct bpf_sock_tuple tup = {};
@@ -178,7 +178,7 @@ int lookup_null_bpf_opts(struct __sk_buff *ctx)
}
SEC("?xdp")
-__failure __msg("Possibly NULL pointer passed to trusted arg1")
+__failure __msg("Possibly NULL pointer passed to trusted R2")
int xdp_lookup_null_bpf_tuple(struct xdp_md *ctx)
{
struct bpf_ct_opts___local opts = {};
@@ -191,7 +191,7 @@ int xdp_lookup_null_bpf_tuple(struct xdp_md *ctx)
}
SEC("?xdp")
-__failure __msg("Possibly NULL pointer passed to trusted arg3")
+__failure __msg("Possibly NULL pointer passed to trusted R4")
int xdp_lookup_null_bpf_opts(struct xdp_md *ctx)
{
struct bpf_sock_tuple tup = {};
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
index d249113ed657..41da6e619940 100644
--- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
@@ -45,7 +45,7 @@ int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size,
}
SEC("?lsm.s/bpf")
-__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr")
+__failure __msg("R1 expected pointer to stack or const struct bpf_dynptr")
int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
static struct bpf_dynptr val;
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
index 967081bbcfe1..ca35b92ea095 100644
--- a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
@@ -29,7 +29,7 @@ int kfunc_dynptr_nullable_test2(struct __sk_buff *skb)
}
SEC("tc")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int kfunc_dynptr_nullable_test3(struct __sk_buff *skb)
{
struct bpf_dynptr data;
diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
index 8bcddadfc4da..dd97f2027505 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
@@ -32,7 +32,7 @@ int BPF_PROG(no_destroy, struct bpf_iter_meta *meta, struct cgroup *cgrp)
SEC("iter/cgroup")
__description("uninitialized iter in ->next()")
-__failure __msg("expected an initialized iter_bits as arg #0")
+__failure __msg("expected an initialized iter_bits as R1")
int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
{
struct bpf_iter_bits it = {};
@@ -43,7 +43,7 @@ int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
SEC("iter/cgroup")
__description("uninitialized iter in ->destroy()")
-__failure __msg("expected an initialized iter_bits as arg #0")
+__failure __msg("expected an initialized iter_bits as R1")
int BPF_PROG(destroy_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
{
struct bpf_iter_bits it = {};
diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c
index 910365201f68..139f70bb3595 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c
@@ -263,7 +263,7 @@ l0_%=: r0 = 0; \
SEC("lsm.s/bpf")
__description("reference tracking: release user key reference without check")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
__naked void user_key_reference_without_check(void)
{
asm volatile (" \
@@ -282,7 +282,7 @@ __naked void user_key_reference_without_check(void)
SEC("lsm.s/bpf")
__description("reference tracking: release system key reference without check")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
__naked void system_key_reference_without_check(void)
{
asm volatile (" \
@@ -300,7 +300,7 @@ __naked void system_key_reference_without_check(void)
SEC("lsm.s/bpf")
__description("reference tracking: release with NULL key pointer")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
__naked void release_with_null_key_pointer(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
index 4b392c6c8fc4..0990de076844 100644
--- a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
+++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
@@ -13,7 +13,7 @@
static char buf[PATH_MAX];
SEC("lsm.s/file_open")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(get_task_exe_file_kfunc_null)
{
struct file *acquired;
@@ -28,7 +28,7 @@ int BPF_PROG(get_task_exe_file_kfunc_null)
}
SEC("lsm.s/inode_getxattr")
-__failure __msg("arg#0 pointer type STRUCT task_struct must point to scalar, or struct with scalar")
+__failure __msg("R1 pointer type STRUCT task_struct must point to scalar, or struct with scalar")
int BPF_PROG(get_task_exe_file_kfunc_fp)
{
u64 x;
@@ -89,7 +89,7 @@ int BPF_PROG(put_file_kfunc_unacquired, struct file *file)
}
SEC("lsm.s/file_open")
-__failure __msg("Possibly NULL pointer passed to trusted arg0")
+__failure __msg("Possibly NULL pointer passed to trusted R1")
int BPF_PROG(path_d_path_kfunc_null)
{
/* Can't pass NULL value to bpf_path_d_path() kfunc. */
@@ -128,7 +128,7 @@ int BPF_PROG(path_d_path_kfunc_untrusted_from_current)
}
SEC("lsm.s/file_open")
-__failure __msg("kernel function bpf_path_d_path args#0 expected pointer to STRUCT path but R1 has a pointer to STRUCT file")
+__failure __msg("kernel function bpf_path_d_path R1 expected pointer to STRUCT path but R1 has a pointer to STRUCT file")
int BPF_PROG(path_d_path_kfunc_type_mismatch, struct file *file)
{
bpf_path_d_path((struct path *)&file->f_task_work, buf, sizeof(buf));
diff --git a/tools/testing/selftests/bpf/progs/wq_failures.c b/tools/testing/selftests/bpf/progs/wq_failures.c
index 3767f5595bbc..32dc8827e128 100644
--- a/tools/testing/selftests/bpf/progs/wq_failures.c
+++ b/tools/testing/selftests/bpf/progs/wq_failures.c
@@ -98,7 +98,7 @@ __failure
* is a correct bpf_wq pointer.
*/
__msg(": (85) call bpf_wq_set_callback#") /* anchor message */
-__msg("arg#0 doesn't point to a map value")
+__msg("R1 doesn't point to a map value")
long test_wrong_wq_pointer(void *ctx)
{
int key = 0;
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index c3164b9b2be5..0bb4337552c8 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -31,7 +31,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar",
+ .errstr = "R1 pointer type STRUCT prog_test_fail1 must point to scalar",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_fail1", 2 },
},
@@ -46,7 +46,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2",
+ .errstr = "max struct nesting depth exceeded\nR1 pointer type STRUCT prog_test_fail2",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_fail2", 2 },
},
@@ -61,7 +61,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar",
+ .errstr = "R1 pointer type STRUCT prog_test_fail3 must point to scalar",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_fail3", 2 },
},
@@ -76,7 +76,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "arg#0 expected pointer to ctx, but got fp",
+ .errstr = "R1 expected pointer to ctx, but got fp",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_pass_ctx", 2 },
},
@@ -91,7 +91,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "arg#0 pointer type UNKNOWN must point to scalar",
+ .errstr = "R1 pointer type UNKNOWN must point to scalar",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_mem_len_fail1", 2 },
},
@@ -109,7 +109,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "Possibly NULL pointer passed to trusted arg0",
+ .errstr = "Possibly NULL pointer passed to trusted R1",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_test_release", 5 },
@@ -152,7 +152,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
+ .errstr = "kernel function bpf_kfunc_call_memb1_release R1 expected pointer",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_memb_acquire", 1 },
{ "bpf_kfunc_call_memb1_release", 5 },
--
2.52.0
next prev parent reply other threads:[~2026-04-17 3:47 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-17 3:46 [PATCH bpf-next v5 00/16] bpf: Support stack arguments for BPF functions and kfuncs Yonghong Song
2026-04-17 3:47 ` [PATCH bpf-next v5 01/16] bpf: Remove unused parameter from check_map_kptr_access() Yonghong Song
2026-04-17 3:47 ` [PATCH bpf-next v5 02/16] bpf: Refactor to avoid redundant calculation of bpf_reg_state Yonghong Song
2026-04-17 3:47 ` [PATCH bpf-next v5 03/16] bpf: Refactor to handle memory and size together Yonghong Song
2026-04-17 4:49 ` sashiko-bot
2026-04-17 3:47 ` Yonghong Song [this message]
2026-04-17 3:47 ` [PATCH bpf-next v5 05/16] bpf: Introduce bpf register BPF_REG_PARAMS Yonghong Song
2026-04-17 3:47 ` [PATCH bpf-next v5 06/16] bpf: Limit the scope of BPF_REG_PARAMS usage Yonghong Song
2026-04-17 4:30 ` bot+bpf-ci
2026-04-17 4:50 ` sashiko-bot
2026-04-17 3:47 ` [PATCH bpf-next v5 07/16] bpf: Reuse MAX_BPF_FUNC_ARGS for maximum number of arguments Yonghong Song
2026-04-17 4:30 ` bot+bpf-ci
2026-04-17 3:47 ` [PATCH bpf-next v5 08/16] bpf: Support stack arguments for bpf functions Yonghong Song
2026-04-17 4:35 ` sashiko-bot
2026-04-17 4:43 ` bot+bpf-ci
2026-04-17 3:47 ` [PATCH bpf-next v5 09/16] bpf: Reject stack arguments in non-JITed programs Yonghong Song
2026-04-17 4:30 ` bot+bpf-ci
2026-04-17 3:47 ` [PATCH bpf-next v5 10/16] bpf: Reject stack arguments if tail call reachable Yonghong Song
2026-04-17 4:08 ` sashiko-bot
2026-04-17 4:30 ` bot+bpf-ci
2026-04-17 3:47 ` [PATCH bpf-next v5 11/16] bpf: Support stack arguments for kfunc calls Yonghong Song
2026-04-17 4:40 ` sashiko-bot
2026-04-17 4:43 ` bot+bpf-ci
2026-04-17 3:47 ` [PATCH bpf-next v5 12/16] bpf: Enable stack argument support for x86_64 Yonghong Song
2026-04-17 4:30 ` bot+bpf-ci
2026-04-17 5:03 ` sashiko-bot
2026-04-17 3:48 ` [PATCH bpf-next v5 13/16] bpf,x86: Implement JIT support for stack arguments Yonghong Song
2026-04-17 4:44 ` sashiko-bot
2026-04-17 3:48 ` [PATCH bpf-next v5 14/16] selftests/bpf: Add tests for BPF function " Yonghong Song
2026-04-17 4:20 ` sashiko-bot
2026-04-17 3:48 ` [PATCH bpf-next v5 15/16] selftests/bpf: Add negative test for greater-than-8-byte kfunc stack argument Yonghong Song
2026-04-17 4:28 ` sashiko-bot
2026-04-17 3:48 ` [PATCH bpf-next v5 16/16] selftests/bpf: Add verifier tests for stack argument validation Yonghong Song
2026-04-17 4:38 ` sashiko-bot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260417034719.2627762-1-yonghong.song@linux.dev \
--to=yonghong.song@linux.dev \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=jose.marchesi@oracle.com \
--cc=kernel-team@fb.com \
--cc=martin.lau@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox