* [PATCH bpf-next 01/10] bpf: rearrange bpf_func_state fields to save a bit of memory
2023-11-22 1:16 [PATCH bpf-next 00/10] BPF verifier retval logic fixes Andrii Nakryiko
@ 2023-11-22 1:16 ` Andrii Nakryiko
2023-11-22 15:12 ` Eduard Zingerman
2023-11-22 1:16 ` [PATCH bpf-next 02/10] bpf: provide correct register name for exception callback retval check Andrii Nakryiko
` (8 subsequent siblings)
9 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 1:16 UTC (permalink / raw)
To: bpf, ast, daniel, martin.lau; +Cc: andrii, kernel-team
It's a trivial rearrangement saving 8 bytes. We have 4 bytes of padding
at the end which can be filled with another field without increasing
struct bpf_func_state.
copy_func_state() logic remains correct without any further changes.
BEFORE
======
struct bpf_func_state {
struct bpf_reg_state regs[11]; /* 0 1320 */
/* --- cacheline 20 boundary (1280 bytes) was 40 bytes ago --- */
int callsite; /* 1320 4 */
u32 frameno; /* 1324 4 */
u32 subprogno; /* 1328 4 */
u32 async_entry_cnt; /* 1332 4 */
bool in_callback_fn; /* 1336 1 */
/* XXX 7 bytes hole, try to pack */
/* --- cacheline 21 boundary (1344 bytes) --- */
struct tnum callback_ret_range; /* 1344 16 */
bool in_async_callback_fn; /* 1360 1 */
bool in_exception_callback_fn; /* 1361 1 */
/* XXX 2 bytes hole, try to pack */
int acquired_refs; /* 1364 4 */
struct bpf_reference_state * refs; /* 1368 8 */
int allocated_stack; /* 1376 4 */
/* XXX 4 bytes hole, try to pack */
struct bpf_stack_state * stack; /* 1384 8 */
/* size: 1392, cachelines: 22, members: 13 */
/* sum members: 1379, holes: 3, sum holes: 13 */
/* last cacheline: 48 bytes */
};
AFTER
=====
struct bpf_func_state {
struct bpf_reg_state regs[11]; /* 0 1320 */
/* --- cacheline 20 boundary (1280 bytes) was 40 bytes ago --- */
int callsite; /* 1320 4 */
u32 frameno; /* 1324 4 */
u32 subprogno; /* 1328 4 */
u32 async_entry_cnt; /* 1332 4 */
struct tnum callback_ret_range; /* 1336 16 */
/* --- cacheline 21 boundary (1344 bytes) was 8 bytes ago --- */
bool in_callback_fn; /* 1352 1 */
bool in_async_callback_fn; /* 1353 1 */
bool in_exception_callback_fn; /* 1354 1 */
/* XXX 1 byte hole, try to pack */
int acquired_refs; /* 1356 4 */
struct bpf_reference_state * refs; /* 1360 8 */
struct bpf_stack_state * stack; /* 1368 8 */
int allocated_stack; /* 1376 4 */
/* size: 1384, cachelines: 22, members: 13 */
/* sum members: 1379, holes: 1, sum holes: 1 */
/* padding: 4 */
/* last cacheline: 40 bytes */
};
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
include/linux/bpf_verifier.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 39edc76f436e..f019da8bf423 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -297,16 +297,16 @@ struct bpf_func_state {
* void foo(void) { bpf_timer_set_callback(,foo); }
*/
u32 async_entry_cnt;
- bool in_callback_fn;
struct tnum callback_ret_range;
+ bool in_callback_fn;
bool in_async_callback_fn;
bool in_exception_callback_fn;
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
struct bpf_reference_state *refs;
- int allocated_stack;
struct bpf_stack_state *stack;
+ int allocated_stack;
};
struct bpf_idx_pair {
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH bpf-next 02/10] bpf: provide correct register name for exception callback retval check
2023-11-22 1:16 [PATCH bpf-next 00/10] BPF verifier retval logic fixes Andrii Nakryiko
2023-11-22 1:16 ` [PATCH bpf-next 01/10] bpf: rearrange bpf_func_state fields to save a bit of memory Andrii Nakryiko
@ 2023-11-22 1:16 ` Andrii Nakryiko
2023-11-22 15:12 ` Eduard Zingerman
2023-11-23 1:44 ` Alexei Starovoitov
2023-11-22 1:16 ` [PATCH bpf-next 03/10] bpf: enforce precision of R0 on callback return Andrii Nakryiko
` (7 subsequent siblings)
9 siblings, 2 replies; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 1:16 UTC (permalink / raw)
To: bpf, ast, daniel, martin.lau; +Cc: andrii, kernel-team
bpf_throw() is checking R1, so let's report R1 in the log.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
kernel/bpf/verifier.c | 12 ++++++------
.../testing/selftests/bpf/progs/exceptions_assert.c | 2 +-
tools/testing/selftests/bpf/progs/exceptions_fail.c | 2 +-
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8c2d31aa3d31..a921dba4f603 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -11663,7 +11663,7 @@ static int fetch_kfunc_meta(struct bpf_verifier_env *env,
return 0;
}
-static int check_return_code(struct bpf_verifier_env *env, int regno);
+static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name);
static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx_p)
@@ -11799,7 +11799,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
* to bpf_throw becomes the return value of the program.
*/
if (!env->exception_callback_subprog) {
- err = check_return_code(env, BPF_REG_1);
+ err = check_return_code(env, BPF_REG_1, "R1");
if (err < 0)
return err;
}
@@ -14818,7 +14818,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
-static int check_return_code(struct bpf_verifier_env *env, int regno)
+static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name)
{
struct tnum enforce_attach_type_range = tnum_unknown;
const struct bpf_prog *prog = env->prog;
@@ -14872,7 +14872,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno)
}
if (!tnum_in(const_0, reg->var_off)) {
- verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0");
+ verbose_invalid_scalar(env, reg, &const_0, "async callback", reg_name);
return -EINVAL;
}
return 0;
@@ -14972,7 +14972,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno)
}
if (!tnum_in(range, reg->var_off)) {
- verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
+ verbose_invalid_scalar(env, reg, &range, "program exit", reg_name);
if (prog->expected_attach_type == BPF_LSM_CGROUP &&
prog_type == BPF_PROG_TYPE_LSM &&
!prog->aux->attach_func_proto->type)
@@ -17220,7 +17220,7 @@ static int do_check(struct bpf_verifier_env *env)
continue;
}
- err = check_return_code(env, BPF_REG_0);
+ err = check_return_code(env, BPF_REG_0, "R0");
if (err)
return err;
process_bpf_exit:
diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c
index 49efaed143fc..575e7dd719c4 100644
--- a/tools/testing/selftests/bpf/progs/exceptions_assert.c
+++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c
@@ -125,7 +125,7 @@ int check_assert_generic(struct __sk_buff *ctx)
}
SEC("?fentry/bpf_check")
-__failure __msg("At program exit the register R0 has value (0x40; 0x0)")
+__failure __msg("At program exit the register R1 has value (0x40; 0x0)")
int check_assert_with_return(void *ctx)
{
bpf_assert_with(!ctx, 64);
diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c
index 4c39e920dac2..69a8fb53ea1d 100644
--- a/tools/testing/selftests/bpf/progs/exceptions_fail.c
+++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c
@@ -306,7 +306,7 @@ int reject_set_exception_cb_bad_ret1(void *ctx)
}
SEC("?fentry/bpf_check")
-__failure __msg("At program exit the register R0 has value (0x40; 0x0) should")
+__failure __msg("At program exit the register R1 has value (0x40; 0x0) should")
int reject_set_exception_cb_bad_ret2(void *ctx)
{
bpf_throw(64);
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread* Re: [PATCH bpf-next 02/10] bpf: provide correct register name for exception callback retval check
2023-11-22 1:16 ` [PATCH bpf-next 02/10] bpf: provide correct register name for exception callback retval check Andrii Nakryiko
@ 2023-11-22 15:12 ` Eduard Zingerman
2023-11-23 1:44 ` Alexei Starovoitov
1 sibling, 0 replies; 27+ messages in thread
From: Eduard Zingerman @ 2023-11-22 15:12 UTC (permalink / raw)
To: Andrii Nakryiko, bpf, ast, daniel, martin.lau; +Cc: kernel-team
On Tue, 2023-11-21 at 17:16 -0800, Andrii Nakryiko wrote:
> bpf_throw() is checking R1, so let's report R1 in the log.
>
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> ---
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
[...]
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH bpf-next 02/10] bpf: provide correct register name for exception callback retval check
2023-11-22 1:16 ` [PATCH bpf-next 02/10] bpf: provide correct register name for exception callback retval check Andrii Nakryiko
2023-11-22 15:12 ` Eduard Zingerman
@ 2023-11-23 1:44 ` Alexei Starovoitov
2023-11-24 3:39 ` Andrii Nakryiko
1 sibling, 1 reply; 27+ messages in thread
From: Alexei Starovoitov @ 2023-11-23 1:44 UTC (permalink / raw)
To: Andrii Nakryiko; +Cc: bpf, ast, daniel, martin.lau, kernel-team
On Tue, Nov 21, 2023 at 05:16:48PM -0800, Andrii Nakryiko wrote:
> SEC("?fentry/bpf_check")
> -__failure __msg("At program exit the register R0 has value (0x40; 0x0)")
> +__failure __msg("At program exit the register R1 has value (0x40; 0x0)")
While at it...
should we change tnum_strn() to print it as normal constant when mask==0 ?
^ permalink raw reply [flat|nested] 27+ messages in thread* Re: [PATCH bpf-next 02/10] bpf: provide correct register name for exception callback retval check
2023-11-23 1:44 ` Alexei Starovoitov
@ 2023-11-24 3:39 ` Andrii Nakryiko
0 siblings, 0 replies; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-24 3:39 UTC (permalink / raw)
To: Alexei Starovoitov
Cc: Andrii Nakryiko, bpf, ast, daniel, martin.lau, kernel-team
On Wed, Nov 22, 2023 at 5:44 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Tue, Nov 21, 2023 at 05:16:48PM -0800, Andrii Nakryiko wrote:
> > SEC("?fentry/bpf_check")
> > -__failure __msg("At program exit the register R0 has value (0x40; 0x0)")
> > +__failure __msg("At program exit the register R1 has value (0x40; 0x0)")
>
> While at it...
> should we change tnum_strn() to print it as normal constant when mask==0 ?
sure, I can add that as another patch, because I suspect even more
tests would have to be adjusted
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH bpf-next 03/10] bpf: enforce precision of R0 on callback return
2023-11-22 1:16 [PATCH bpf-next 00/10] BPF verifier retval logic fixes Andrii Nakryiko
2023-11-22 1:16 ` [PATCH bpf-next 01/10] bpf: rearrange bpf_func_state fields to save a bit of memory Andrii Nakryiko
2023-11-22 1:16 ` [PATCH bpf-next 02/10] bpf: provide correct register name for exception callback retval check Andrii Nakryiko
@ 2023-11-22 1:16 ` Andrii Nakryiko
2023-11-22 15:12 ` Eduard Zingerman
2023-11-22 1:16 ` [PATCH bpf-next 04/10] bpf: enforce exact retval range on subprog/callback exit Andrii Nakryiko
` (6 subsequent siblings)
9 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 1:16 UTC (permalink / raw)
To: bpf, ast, daniel, martin.lau; +Cc: andrii, kernel-team
Given verifier checks actual value, r0 has to be precise, so we need to
propagate precision properly. r0 also has to be marked as read,
otherwise subsequent state comparisons will ignore such register as
unimportant and precision won't really help here.
Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
kernel/bpf/verifier.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a921dba4f603..b227f23e063d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -9493,6 +9493,13 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
verbose(env, "R0 not a scalar value\n");
return -EACCES;
}
+
+ /* we are going to rely on register's precise value */
+ err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64);
+ err = err ?: mark_chain_precision(env, BPF_REG_0);
+ if (err)
+ return err;
+
if (!tnum_in(range, r0->var_off)) {
verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
return -EINVAL;
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread* Re: [PATCH bpf-next 03/10] bpf: enforce precision of R0 on callback return
2023-11-22 1:16 ` [PATCH bpf-next 03/10] bpf: enforce precision of R0 on callback return Andrii Nakryiko
@ 2023-11-22 15:12 ` Eduard Zingerman
0 siblings, 0 replies; 27+ messages in thread
From: Eduard Zingerman @ 2023-11-22 15:12 UTC (permalink / raw)
To: Andrii Nakryiko, bpf, ast, daniel, martin.lau; +Cc: kernel-team
On Tue, 2023-11-21 at 17:16 -0800, Andrii Nakryiko wrote:
> Given verifier checks actual value, r0 has to be precise, so we need to
> propagate precision properly. r0 also has to be marked as read,
> otherwise subsequent state comparisons will ignore such register as
> unimportant and precision won't really help here.
>
> Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> ---
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
[...]
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH bpf-next 04/10] bpf: enforce exact retval range on subprog/callback exit
2023-11-22 1:16 [PATCH bpf-next 00/10] BPF verifier retval logic fixes Andrii Nakryiko
` (2 preceding siblings ...)
2023-11-22 1:16 ` [PATCH bpf-next 03/10] bpf: enforce precision of R0 on callback return Andrii Nakryiko
@ 2023-11-22 1:16 ` Andrii Nakryiko
2023-11-22 15:13 ` Eduard Zingerman
2023-11-22 1:16 ` [PATCH bpf-next 05/10] selftests/bpf: add selftest validating callback result is enforced Andrii Nakryiko
` (5 subsequent siblings)
9 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 1:16 UTC (permalink / raw)
To: bpf, ast, daniel, martin.lau; +Cc: andrii, kernel-team
Instead of relying on potentially imprecise tnum representation of
expected return value range for callbacks and subprogs, validate that
both tnum and umin/umax range satisfy exact expected range of return
values.
E.g., if callback would need to return [0, 2] range, tnum can't
represent this precisely and instead will allow [0, 3] range. By
additionally checking umin/umax range, we can make sure that
subprog/callback indeed returns only valid [0, 2] range.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
include/linux/bpf_verifier.h | 7 +++++-
kernel/bpf/verifier.c | 45 +++++++++++++++++++++++++++---------
2 files changed, 40 insertions(+), 12 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index f019da8bf423..00702a3cca62 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -275,6 +275,11 @@ struct bpf_reference_state {
int callback_ref;
};
+struct bpf_retval_range {
+ u32 minval;
+ u32 maxval;
+};
+
/* state of the program:
* type of all registers and stack info
*/
@@ -297,7 +302,7 @@ struct bpf_func_state {
* void foo(void) { bpf_timer_set_callback(,foo); }
*/
u32 async_entry_cnt;
- struct tnum callback_ret_range;
+ struct bpf_retval_range callback_ret_range;
bool in_callback_fn;
bool in_async_callback_fn;
bool in_exception_callback_fn;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b227f23e063d..fc103fd03896 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2272,6 +2272,19 @@ static void init_reg_state(struct bpf_verifier_env *env,
regs[BPF_REG_FP].frameno = state->frameno;
}
+static struct bpf_retval_range retval_range(u32 minval, u32 maxval)
+{
+ return (struct bpf_retval_range){ minval, maxval };
+}
+
+static struct tnum retval_range_as_tnum(struct bpf_retval_range range)
+{
+ if (range.minval == range.maxval)
+ return tnum_const(range.minval);
+ else
+ return tnum_range(range.minval, range.maxval);
+}
+
#define BPF_MAIN_FUNC (-1)
static void init_func_state(struct bpf_verifier_env *env,
struct bpf_func_state *state,
@@ -2280,7 +2293,7 @@ static void init_func_state(struct bpf_verifier_env *env,
state->callsite = callsite;
state->frameno = frameno;
state->subprogno = subprogno;
- state->callback_ret_range = tnum_range(0, 0);
+ state->callback_ret_range = retval_range(0, 0);
init_reg_state(env, state);
mark_verifier_state_scratched(env);
}
@@ -9300,7 +9313,7 @@ static int set_map_elem_callback_state(struct bpf_verifier_env *env,
return err;
callee->in_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9322,7 +9335,7 @@ static int set_loop_callback_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9352,7 +9365,7 @@ static int set_timer_callback_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_async_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9380,7 +9393,7 @@ static int set_find_vma_callback_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9403,7 +9416,7 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9435,7 +9448,7 @@ static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9464,6 +9477,16 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
return is_rbtree_lock_required_kfunc(kfunc_btf_id);
}
+static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
+{
+ struct tnum trange = retval_range_as_tnum(range);
+
+ if (!tnum_in(trange, reg->var_off))
+ return false;
+
+ return range.minval <= reg->umin_value && reg->umax_value <= range.maxval;
+}
+
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
@@ -9486,9 +9509,6 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
caller = state->frame[state->curframe - 1];
if (callee->in_callback_fn) {
- /* enforce R0 return value range [0, 1]. */
- struct tnum range = callee->callback_ret_range;
-
if (r0->type != SCALAR_VALUE) {
verbose(env, "R0 not a scalar value\n");
return -EACCES;
@@ -9500,7 +9520,10 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
if (err)
return err;
- if (!tnum_in(range, r0->var_off)) {
+ /* enforce R0 return value range */
+ if (!retval_range_within(callee->callback_ret_range, r0)) {
+ struct tnum range = retval_range_as_tnum(callee->callback_ret_range);
+
verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
return -EINVAL;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread* Re: [PATCH bpf-next 04/10] bpf: enforce exact retval range on subprog/callback exit
2023-11-22 1:16 ` [PATCH bpf-next 04/10] bpf: enforce exact retval range on subprog/callback exit Andrii Nakryiko
@ 2023-11-22 15:13 ` Eduard Zingerman
2023-11-22 17:45 ` Andrii Nakryiko
0 siblings, 1 reply; 27+ messages in thread
From: Eduard Zingerman @ 2023-11-22 15:13 UTC (permalink / raw)
To: Andrii Nakryiko, bpf, ast, daniel, martin.lau; +Cc: kernel-team
On Tue, 2023-11-21 at 17:16 -0800, Andrii Nakryiko wrote:
> Instead of relying on potentially imprecise tnum representation of
> expected return value range for callbacks and subprogs, validate that
> both tnum and umin/umax range satisfy exact expected range of return
> values.
>
> E.g., if callback would need to return [0, 2] range, tnum can't
> represent this precisely and instead will allow [0, 3] range. By
> additionally checking umin/umax range, we can make sure that
> subprog/callback indeed returns only valid [0, 2] range.
>
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> ---
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
(but please see a question below)
[...]
> @@ -9464,6 +9477,16 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
> return is_rbtree_lock_required_kfunc(kfunc_btf_id);
> }
>
> +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
> +{
> + struct tnum trange = retval_range_as_tnum(range);
> +
> + if (!tnum_in(trange, reg->var_off))
> + return false;
Q: When is it necessary to do this check?
I tried commenting it and test_{verifier,progs} still pass.
Are there situations when umin/umax change is not sufficient?
> +
> + return range.minval <= reg->umin_value && reg->umax_value <= range.maxval;
> +}
> +
[...]
^ permalink raw reply [flat|nested] 27+ messages in thread* Re: [PATCH bpf-next 04/10] bpf: enforce exact retval range on subprog/callback exit
2023-11-22 15:13 ` Eduard Zingerman
@ 2023-11-22 17:45 ` Andrii Nakryiko
2023-11-27 10:55 ` Shung-Hsi Yu
0 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 17:45 UTC (permalink / raw)
To: Eduard Zingerman
Cc: Andrii Nakryiko, bpf, ast, daniel, martin.lau, kernel-team
On Wed, Nov 22, 2023 at 7:13 AM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Tue, 2023-11-21 at 17:16 -0800, Andrii Nakryiko wrote:
> > Instead of relying on potentially imprecise tnum representation of
> > expected return value range for callbacks and subprogs, validate that
> > both tnum and umin/umax range satisfy exact expected range of return
> > values.
> >
> > E.g., if callback would need to return [0, 2] range, tnum can't
> > represent this precisely and instead will allow [0, 3] range. By
> > additionally checking umin/umax range, we can make sure that
> > subprog/callback indeed returns only valid [0, 2] range.
> >
> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> > ---
>
> Acked-by: Eduard Zingerman <eddyz87@gmail.com>
> (but please see a question below)
>
> [...]
>
> > @@ -9464,6 +9477,16 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
> > return is_rbtree_lock_required_kfunc(kfunc_btf_id);
> > }
> >
> > +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
> > +{
> > + struct tnum trange = retval_range_as_tnum(range);
> > +
> > + if (!tnum_in(trange, reg->var_off))
> > + return false;
>
> Q: When is it necessary to do this check?
> I tried commenting it and test_{verifier,progs} still pass.
> Are there situations when umin/umax change is not sufficient?
I believe not. But we still check tnum in check_cond_jmp_op, for
example, so I decided to keep it to not have to argue and prove why
it's ok to ditch tnum.
Generally speaking, I think tnum is useful in only one use case:
checking (un)aligned memory accesses. This is the only representation
that can make sure we have lower 2-3 bits as zero to prove that memory
access is 4- or 8-byte aligned.
Other than this, I think ranges are more precise and easier to work with.
But I'm not ready to go on the quest to eliminate tnum usage everywhere :)
>
> > +
> > + return range.minval <= reg->umin_value && reg->umax_value <= range.maxval;
> > +}
> > +
>
> [...]
>
>
^ permalink raw reply [flat|nested] 27+ messages in thread* Re: [PATCH bpf-next 04/10] bpf: enforce exact retval range on subprog/callback exit
2023-11-22 17:45 ` Andrii Nakryiko
@ 2023-11-27 10:55 ` Shung-Hsi Yu
2023-11-27 18:19 ` Andrii Nakryiko
0 siblings, 1 reply; 27+ messages in thread
From: Shung-Hsi Yu @ 2023-11-27 10:55 UTC (permalink / raw)
To: Andrii Nakryiko
Cc: Eduard Zingerman, Andrii Nakryiko, bpf, ast, daniel, martin.lau,
kernel-team
On Wed, Nov 22, 2023 at 09:45:27AM -0800, Andrii Nakryiko wrote:
> On Wed, Nov 22, 2023 at 7:13 AM Eduard Zingerman <eddyz87@gmail.com> wrote:
> >
> > On Tue, 2023-11-21 at 17:16 -0800, Andrii Nakryiko wrote:
> > > Instead of relying on potentially imprecise tnum representation of
> > > expected return value range for callbacks and subprogs, validate that
> > > both tnum and umin/umax range satisfy exact expected range of return
> > > values.
> > >
> > > E.g., if callback would need to return [0, 2] range, tnum can't
> > > represent this precisely and instead will allow [0, 3] range. By
> > > additionally checking umin/umax range, we can make sure that
> > > subprog/callback indeed returns only valid [0, 2] range.
> > >
> > > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> > > ---
> >
> > Acked-by: Eduard Zingerman <eddyz87@gmail.com>
> > (but please see a question below)
> >
> > [...]
> >
> > > +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
> > > +{
> > > + struct tnum trange = retval_range_as_tnum(range);
> > > +
> > > + if (!tnum_in(trange, reg->var_off))
> > > + return false;
> >
> > Q: When is it necessary to do this check?
> > I tried commenting it and test_{verifier,progs} still pass.
> > Are there situations when umin/umax change is not sufficient?
>
> I believe not. But we still check tnum in check_cond_jmp_op, for
> example, so I decided to keep it to not have to argue and prove why
> it's ok to ditch tnum.
Semi-related proof[1] from awhile back :)
> Generally speaking, I think tnum is useful in only one use case:
> checking (un)aligned memory accesses. This is the only representation
> that can make sure we have lower 2-3 bits as zero to prove that memory
> access is 4- or 8-byte aligned.
>
> Other than this, I think ranges are more precise and easier to work with.
Agree with the above.
I'd vote for ditching tnum for the retval check here. With umin/umax
check in place there really isn't a need for an additional tnum check at
all. Keeping it probably does more harm (in the form of confusion) than
good.
1: https://lore.kernel.org/bpf/20220831031907.16133-3-shung-hsi.yu@suse.com/
> But I'm not ready to go on the quest to eliminate tnum usage everywhere :)
>
> > > +
> > > + return range.minval <= reg->umin_value && reg->umax_value <= range.maxval;
> > > +}
> > > +
> >
> > [...]
^ permalink raw reply [flat|nested] 27+ messages in thread* Re: [PATCH bpf-next 04/10] bpf: enforce exact retval range on subprog/callback exit
2023-11-27 10:55 ` Shung-Hsi Yu
@ 2023-11-27 18:19 ` Andrii Nakryiko
0 siblings, 0 replies; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-27 18:19 UTC (permalink / raw)
To: Shung-Hsi Yu
Cc: Eduard Zingerman, Andrii Nakryiko, bpf, ast, daniel, martin.lau,
kernel-team
On Mon, Nov 27, 2023 at 2:55 AM Shung-Hsi Yu <shung-hsi.yu@suse.com> wrote:
>
> On Wed, Nov 22, 2023 at 09:45:27AM -0800, Andrii Nakryiko wrote:
> > On Wed, Nov 22, 2023 at 7:13 AM Eduard Zingerman <eddyz87@gmail.com> wrote:
> > >
> > > On Tue, 2023-11-21 at 17:16 -0800, Andrii Nakryiko wrote:
> > > > Instead of relying on potentially imprecise tnum representation of
> > > > expected return value range for callbacks and subprogs, validate that
> > > > both tnum and umin/umax range satisfy exact expected range of return
> > > > values.
> > > >
> > > > E.g., if callback would need to return [0, 2] range, tnum can't
> > > > represent this precisely and instead will allow [0, 3] range. By
> > > > additionally checking umin/umax range, we can make sure that
> > > > subprog/callback indeed returns only valid [0, 2] range.
> > > >
> > > > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> > > > ---
> > >
> > > Acked-by: Eduard Zingerman <eddyz87@gmail.com>
> > > (but please see a question below)
> > >
> > > [...]
> > >
> > > > +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
> > > > +{
> > > > + struct tnum trange = retval_range_as_tnum(range);
> > > > +
> > > > + if (!tnum_in(trange, reg->var_off))
> > > > + return false;
> > >
> > > Q: When is it necessary to do this check?
> > > I tried commenting it and test_{verifier,progs} still pass.
> > > Are there situations when umin/umax change is not sufficient?
> >
> > I believe not. But we still check tnum in check_cond_jmp_op, for
> > example, so I decided to keep it to not have to argue and prove why
> > it's ok to ditch tnum.
>
> Semi-related proof[1] from awhile back :)
>
> > Generally speaking, I think tnum is useful in only one use case:
> > checking (un)aligned memory accesses. This is the only representation
> > that can make sure we have lower 2-3 bits as zero to prove that memory
> > access is 4- or 8-byte aligned.
> >
> > Other than this, I think ranges are more precise and easier to work with.
>
> Agree with the above.
>
> I'd vote for ditching tnum for the retval check here. With umin/umax
> check in place there really isn't a need for an additional tnum check at
> all. Keeping it probably does more harm (in the form of confusion) than
> good.
Ok, I think it's as trivial as removing two lines of code. So I'll add
it as the last patch to the series and will let BPF maintainers decide
if they want this change or not.
>
> 1: https://lore.kernel.org/bpf/20220831031907.16133-3-shung-hsi.yu@suse.com/
>
> > But I'm not ready to go on the quest to eliminate tnum usage everywhere :)
> >
> > > > +
> > > > + return range.minval <= reg->umin_value && reg->umax_value <= range.maxval;
> > > > +}
> > > > +
> > >
> > > [...]
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH bpf-next 05/10] selftests/bpf: add selftest validating callback result is enforced
2023-11-22 1:16 [PATCH bpf-next 00/10] BPF verifier retval logic fixes Andrii Nakryiko
` (3 preceding siblings ...)
2023-11-22 1:16 ` [PATCH bpf-next 04/10] bpf: enforce exact retval range on subprog/callback exit Andrii Nakryiko
@ 2023-11-22 1:16 ` Andrii Nakryiko
2023-11-22 15:13 ` Eduard Zingerman
2023-11-22 1:16 ` [PATCH bpf-next 06/10] bpf: enforce precise retval range on program exit Andrii Nakryiko
` (4 subsequent siblings)
9 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 1:16 UTC (permalink / raw)
To: bpf, ast, daniel, martin.lau; +Cc: andrii, kernel-team
BPF verifier expects callback subprogs to return values from specified
range (typically [0, 1]). This requires that r0 at exit is both precise
(because we rely on specific value range) and is marked as read
(otherwise state comparison will ignore such register as unimportant).
Add a simple test that validates that all these conditions are enforced.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
.../bpf/progs/verifier_subprog_precision.c | 45 +++++++++++++++++++
1 file changed, 45 insertions(+)
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
index db6b3143338b..65c49e56797a 100644
--- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
@@ -117,6 +117,51 @@ __naked int global_subprog_result_precise(void)
);
}
+__naked __noinline __used
+static unsigned long loop_callback_bad()
+{
+ /* bpf_loop() callback that can return values outside of [0, 1] range */
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 > 1000 goto 1f;"
+ "r0 = 0;"
+ "1:"
+ "goto +0;" /* checkpoint */
+ /* bpf_loop() expects [0, 1] values, so branch above skipping
+ * r0 = 0; should lead to a failure, but if exit instruction
+ * doesn't enforce r0's precision, this callback will be
+ * successfully verified
+ */
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_common
+ );
+}
+
+SEC("?raw_tp")
+__failure __log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+__msg("from 10 to 12: frame1: R0=scalar(umin=1001) R10=fp0 cb")
+__msg("At callback return the register R0 has unknown scalar value should have been in (0x0; 0x1)")
+__naked int callback_precise_return_fail(void)
+{
+ asm volatile (
+ "r1 = 1;" /* nr_loops */
+ "r2 = %[loop_callback_bad];" /* callback_fn */
+ "r3 = 0;" /* callback_ctx */
+ "r4 = 0;" /* flags */
+ "call %[bpf_loop];"
+
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(loop_callback_bad),
+ __imm(bpf_loop)
+ : __clobber_common
+ );
+}
+
SEC("?raw_tp")
__success __log_level(2)
__msg("14: (0f) r1 += r6")
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread* Re: [PATCH bpf-next 05/10] selftests/bpf: add selftest validating callback result is enforced
2023-11-22 1:16 ` [PATCH bpf-next 05/10] selftests/bpf: add selftest validating callback result is enforced Andrii Nakryiko
@ 2023-11-22 15:13 ` Eduard Zingerman
2023-11-22 17:46 ` Andrii Nakryiko
0 siblings, 1 reply; 27+ messages in thread
From: Eduard Zingerman @ 2023-11-22 15:13 UTC (permalink / raw)
To: Andrii Nakryiko, bpf, ast, daniel, martin.lau; +Cc: kernel-team
On Tue, 2023-11-21 at 17:16 -0800, Andrii Nakryiko wrote:
> BPF verifier expects callback subprogs to return values from specified
> range (typically [0, 1]). This requires that r0 at exit is both precise
> (because we rely on specific value range) and is marked as read
> (otherwise state comparison will ignore such register as unimportant).
>
> Add a simple test that validates that all these conditions are enforced.
>
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> ---
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
[...]
> +SEC("?raw_tp")
> +__failure __log_level(2)
> +__flag(BPF_F_TEST_STATE_FREQ)
Nit: although it is redundant, but maybe also check precision log to
check that r0 is indeed marked precise?
> +__msg("from 10 to 12: frame1: R0=scalar(umin=1001) R10=fp0 cb")
> +__msg("At callback return the register R0 has unknown scalar value should have been in (0x0; 0x1)")
> +__naked int callback_precise_return_fail(void)
[...]
^ permalink raw reply [flat|nested] 27+ messages in thread* Re: [PATCH bpf-next 05/10] selftests/bpf: add selftest validating callback result is enforced
2023-11-22 15:13 ` Eduard Zingerman
@ 2023-11-22 17:46 ` Andrii Nakryiko
0 siblings, 0 replies; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 17:46 UTC (permalink / raw)
To: Eduard Zingerman
Cc: Andrii Nakryiko, bpf, ast, daniel, martin.lau, kernel-team
On Wed, Nov 22, 2023 at 7:13 AM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Tue, 2023-11-21 at 17:16 -0800, Andrii Nakryiko wrote:
> > BPF verifier expects callback subprogs to return values from specified
> > range (typically [0, 1]). This requires that r0 at exit is both precise
> > (because we rely on specific value range) and is marked as read
> > (otherwise state comparison will ignore such register as unimportant).
> >
> > Add a simple test that validates that all these conditions are enforced.
> >
> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> > ---
>
> Acked-by: Eduard Zingerman <eddyz87@gmail.com>
>
> [...]
>
> > +SEC("?raw_tp")
> > +__failure __log_level(2)
> > +__flag(BPF_F_TEST_STATE_FREQ)
>
> Nit: although it is redundant, but maybe also check precision log to
> check that r0 is indeed marked precise?
>
sure, I'll add another expected msg, no problem
> > +__msg("from 10 to 12: frame1: R0=scalar(umin=1001) R10=fp0 cb")
> > +__msg("At callback return the register R0 has unknown scalar value should have been in (0x0; 0x1)")
> > +__naked int callback_precise_return_fail(void)
>
> [...]
>
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH bpf-next 06/10] bpf: enforce precise retval range on program exit
2023-11-22 1:16 [PATCH bpf-next 00/10] BPF verifier retval logic fixes Andrii Nakryiko
` (4 preceding siblings ...)
2023-11-22 1:16 ` [PATCH bpf-next 05/10] selftests/bpf: add selftest validating callback result is enforced Andrii Nakryiko
@ 2023-11-22 1:16 ` Andrii Nakryiko
2023-11-22 15:13 ` Eduard Zingerman
2023-11-22 1:16 ` [PATCH bpf-next 07/10] bpf: unify async callback and program retval checks Andrii Nakryiko
` (3 subsequent siblings)
9 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 1:16 UTC (permalink / raw)
To: bpf, ast, daniel, martin.lau; +Cc: andrii, kernel-team
Similarly to subprog/callback logic, enforce return value of BPF program
using more precise umin/umax range, in addition to tnum-based check.
We need to adjust a bunch of tests due to a changed format of an error
message.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
kernel/bpf/verifier.c | 55 +++++++++++--------
.../selftests/bpf/progs/exceptions_assert.c | 2 +-
.../selftests/bpf/progs/exceptions_fail.c | 2 +-
.../selftests/bpf/progs/test_global_func15.c | 2 +-
.../selftests/bpf/progs/timer_failure.c | 2 +-
.../selftests/bpf/progs/user_ringbuf_fail.c | 2 +-
.../bpf/progs/verifier_cgroup_inv_retcode.c | 8 +--
.../bpf/progs/verifier_netfilter_retcode.c | 2 +-
.../bpf/progs/verifier_subprog_precision.c | 2 +-
9 files changed, 43 insertions(+), 34 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index fc103fd03896..f2bf7593289b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -352,20 +352,29 @@ __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
static void verbose_invalid_scalar(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
- struct tnum *range, const char *ctx,
+ struct bpf_retval_range range, const char *ctx,
const char *reg_name)
{
char tn_buf[48];
+ bool unknown = true;
- verbose(env, "At %s the register %s ", ctx, reg_name);
+ verbose(env, "At %s the register %s has", ctx, reg_name);
+ if (reg->umin_value > 0) {
+ verbose(env, " umin=%llu", reg->umin_value);
+ unknown = false;
+ }
+ if (reg->umax_value < U64_MAX) {
+ verbose(env, " umax=%llu", reg->umax_value);
+ unknown = false;
+ }
if (!tnum_is_unknown(reg->var_off)) {
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "has value %s", tn_buf);
- } else {
- verbose(env, "has unknown scalar value");
+ verbose(env, " var_off=%s", tn_buf);
+ unknown = false;
}
- tnum_strn(tn_buf, sizeof(tn_buf), *range);
- verbose(env, " should have been in %s\n", tn_buf);
+ if (unknown)
+ verbose(env, " unknown scalar value");
+ verbose(env, " should have been in [%u, %u]\n", range.minval, range.maxval);
}
static bool type_may_be_null(u32 type)
@@ -9522,9 +9531,8 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
/* enforce R0 return value range */
if (!retval_range_within(callee->callback_ret_range, r0)) {
- struct tnum range = retval_range_as_tnum(callee->callback_ret_range);
-
- verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
+ verbose_invalid_scalar(env, r0, callee->callback_ret_range,
+ "callback return", "R0");
return -EINVAL;
}
} else {
@@ -14853,7 +14861,8 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
struct tnum enforce_attach_type_range = tnum_unknown;
const struct bpf_prog *prog = env->prog;
struct bpf_reg_state *reg;
- struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0);
+ struct bpf_retval_range range = retval_range(0, 1);
+ struct bpf_retval_range const_0 = retval_range(0, 0);
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
int err;
struct bpf_func_state *frame = env->cur_state->frame[0];
@@ -14901,8 +14910,8 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
return -EINVAL;
}
- if (!tnum_in(const_0, reg->var_off)) {
- verbose_invalid_scalar(env, reg, &const_0, "async callback", reg_name);
+ if (!retval_range_within(const_0, reg)) {
+ verbose_invalid_scalar(env, reg, const_0, "async callback", reg_name);
return -EINVAL;
}
return 0;
@@ -14928,14 +14937,14 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME ||
env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETSOCKNAME)
- range = tnum_range(1, 1);
+ range = retval_range(1, 1);
if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
- range = tnum_range(0, 3);
+ range = retval_range(0, 3);
break;
case BPF_PROG_TYPE_CGROUP_SKB:
if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
- range = tnum_range(0, 3);
+ range = retval_range(0, 3);
enforce_attach_type_range = tnum_range(2, 3);
}
break;
@@ -14948,13 +14957,13 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
case BPF_PROG_TYPE_RAW_TRACEPOINT:
if (!env->prog->aux->attach_btf_id)
return 0;
- range = tnum_const(0);
+ range = retval_range(0, 0);
break;
case BPF_PROG_TYPE_TRACING:
switch (env->prog->expected_attach_type) {
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
- range = tnum_const(0);
+ range = retval_range(0, 0);
break;
case BPF_TRACE_RAW_TP:
case BPF_MODIFY_RETURN:
@@ -14966,7 +14975,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
}
break;
case BPF_PROG_TYPE_SK_LOOKUP:
- range = tnum_range(SK_DROP, SK_PASS);
+ range = retval_range(SK_DROP, SK_PASS);
break;
case BPF_PROG_TYPE_LSM:
@@ -14980,12 +14989,12 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
/* Make sure programs that attach to void
* hooks don't try to modify return value.
*/
- range = tnum_range(1, 1);
+ range = retval_range(1, 1);
}
break;
case BPF_PROG_TYPE_NETFILTER:
- range = tnum_range(NF_DROP, NF_ACCEPT);
+ range = retval_range(NF_DROP, NF_ACCEPT);
break;
case BPF_PROG_TYPE_EXT:
/* freplace program can return anything as its return value
@@ -15001,8 +15010,8 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
return -EINVAL;
}
- if (!tnum_in(range, reg->var_off)) {
- verbose_invalid_scalar(env, reg, &range, "program exit", reg_name);
+ if (!retval_range_within(range, reg)) {
+ verbose_invalid_scalar(env, reg, range, "program exit", reg_name);
if (prog->expected_attach_type == BPF_LSM_CGROUP &&
prog_type == BPF_PROG_TYPE_LSM &&
!prog->aux->attach_func_proto->type)
diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c
index 575e7dd719c4..49793c300951 100644
--- a/tools/testing/selftests/bpf/progs/exceptions_assert.c
+++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c
@@ -125,7 +125,7 @@ int check_assert_generic(struct __sk_buff *ctx)
}
SEC("?fentry/bpf_check")
-__failure __msg("At program exit the register R1 has value (0x40; 0x0)")
+__failure __msg("At program exit the register R1 has umin=64 umax=64 var_off=(0x40; 0x0)")
int check_assert_with_return(void *ctx)
{
bpf_assert_with(!ctx, 64);
diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c
index 69a8fb53ea1d..8041d1309b70 100644
--- a/tools/testing/selftests/bpf/progs/exceptions_fail.c
+++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c
@@ -306,7 +306,7 @@ int reject_set_exception_cb_bad_ret1(void *ctx)
}
SEC("?fentry/bpf_check")
-__failure __msg("At program exit the register R1 has value (0x40; 0x0) should")
+__failure __msg("At program exit the register R1 has umin=64 umax=64 var_off=(0x40; 0x0) should")
int reject_set_exception_cb_bad_ret2(void *ctx)
{
bpf_throw(64);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func15.c b/tools/testing/selftests/bpf/progs/test_global_func15.c
index b512d6a6c75e..f80207480e8a 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func15.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func15.c
@@ -13,7 +13,7 @@ __noinline int foo(unsigned int *v)
}
SEC("cgroup_skb/ingress")
-__failure __msg("At program exit the register R0 has value")
+__failure __msg("At program exit the register R0 has ")
int global_func15(struct __sk_buff *skb)
{
unsigned int v = 1;
diff --git a/tools/testing/selftests/bpf/progs/timer_failure.c b/tools/testing/selftests/bpf/progs/timer_failure.c
index 226d33b5a05c..9000da1e2120 100644
--- a/tools/testing/selftests/bpf/progs/timer_failure.c
+++ b/tools/testing/selftests/bpf/progs/timer_failure.c
@@ -30,7 +30,7 @@ static int timer_cb_ret1(void *map, int *key, struct bpf_timer *timer)
}
SEC("fentry/bpf_fentry_test1")
-__failure __msg("should have been in (0x0; 0x0)")
+__failure __msg("should have been in [0, 0]")
int BPF_PROG2(test_ret_1, int, a)
{
int key = 0;
diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
index 03ee946c6bf7..11ab25c42c36 100644
--- a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
+++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
@@ -184,7 +184,7 @@ invalid_drain_callback_return(struct bpf_dynptr *dynptr, void *context)
* not be able to write to that pointer.
*/
SEC("?raw_tp")
-__failure __msg("At callback return the register R0 has value")
+__failure __msg("At callback return the register R0 has ")
int user_ringbuf_callback_invalid_return(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, invalid_drain_callback_return, NULL, 0);
diff --git a/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c b/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c
index d6c4a7f3f790..b1e61b16aaf3 100644
--- a/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c
+++ b/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c
@@ -7,7 +7,7 @@
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test1")
-__failure __msg("R0 has value (0x0; 0xffffffff)")
+__failure __msg("var_off=(0x0; 0xffffffff) should have been in [0, 1]")
__naked void with_invalid_return_code_test1(void)
{
asm volatile (" \
@@ -30,7 +30,7 @@ __naked void with_invalid_return_code_test2(void)
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test3")
-__failure __msg("R0 has value (0x0; 0x3)")
+__failure __msg("var_off=(0x0; 0x3) should have been in [0, 1]")
__naked void with_invalid_return_code_test3(void)
{
asm volatile (" \
@@ -53,7 +53,7 @@ __naked void with_invalid_return_code_test4(void)
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test5")
-__failure __msg("R0 has value (0x2; 0x0)")
+__failure __msg("var_off=(0x2; 0x0) should have been in [0, 1]")
__naked void with_invalid_return_code_test5(void)
{
asm volatile (" \
@@ -75,7 +75,7 @@ __naked void with_invalid_return_code_test6(void)
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test7")
-__failure __msg("R0 has unknown scalar value")
+__failure __msg("R0 has unknown scalar value should have been in [0, 1]")
__naked void with_invalid_return_code_test7(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c
index 353ae6da00e1..48fd29a081e1 100644
--- a/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c
+++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c
@@ -39,7 +39,7 @@ __naked void with_valid_return_code_test3(void)
SEC("netfilter")
__description("bpf_exit with invalid return code. test4")
-__failure __msg("R0 has value (0x2; 0x0)")
+__failure __msg("R0 has umin=2 umax=2 var_off=(0x2; 0x0) should have been in [0, 1]")
__naked void with_invalid_return_code_test4(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
index 65c49e56797a..2c4c6f7c1517 100644
--- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
@@ -143,7 +143,7 @@ SEC("?raw_tp")
__failure __log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)
__msg("from 10 to 12: frame1: R0=scalar(umin=1001) R10=fp0 cb")
-__msg("At callback return the register R0 has unknown scalar value should have been in (0x0; 0x1)")
+__msg("At callback return the register R0 has umin=1001 should have been in [0, 1]")
__naked int callback_precise_return_fail(void)
{
asm volatile (
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH bpf-next 07/10] bpf: unify async callback and program retval checks
2023-11-22 1:16 [PATCH bpf-next 00/10] BPF verifier retval logic fixes Andrii Nakryiko
` (5 preceding siblings ...)
2023-11-22 1:16 ` [PATCH bpf-next 06/10] bpf: enforce precise retval range on program exit Andrii Nakryiko
@ 2023-11-22 1:16 ` Andrii Nakryiko
2023-11-22 15:13 ` Eduard Zingerman
2023-11-22 1:16 ` [PATCH bpf-next 08/10] bpf: enforce precision of R0 on program/async callback return Andrii Nakryiko
` (2 subsequent siblings)
9 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 1:16 UTC (permalink / raw)
To: bpf, ast, daniel, martin.lau; +Cc: andrii, kernel-team
Use common logic to verify program return values and async callback
return values. This allows to avoid duplication of any extra steps
necessary, like precision marking, which will be added in the next
patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
kernel/bpf/verifier.c | 30 ++++++++++++------------------
1 file changed, 12 insertions(+), 18 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f2bf7593289b..87d720d44e0c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -358,7 +358,7 @@ static void verbose_invalid_scalar(struct bpf_verifier_env *env,
char tn_buf[48];
bool unknown = true;
- verbose(env, "At %s the register %s has", ctx, reg_name);
+ verbose(env, "%s the register %s has", ctx, reg_name);
if (reg->umin_value > 0) {
verbose(env, " umin=%llu", reg->umin_value);
unknown = false;
@@ -9532,7 +9532,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
/* enforce R0 return value range */
if (!retval_range_within(callee->callback_ret_range, r0)) {
verbose_invalid_scalar(env, r0, callee->callback_ret_range,
- "callback return", "R0");
+ "At callback return", "R0");
return -EINVAL;
}
} else {
@@ -14858,11 +14858,11 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name)
{
+ const char *exit_ctx = "At program exit";
struct tnum enforce_attach_type_range = tnum_unknown;
const struct bpf_prog *prog = env->prog;
struct bpf_reg_state *reg;
struct bpf_retval_range range = retval_range(0, 1);
- struct bpf_retval_range const_0 = retval_range(0, 0);
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
int err;
struct bpf_func_state *frame = env->cur_state->frame[0];
@@ -14904,17 +14904,9 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
if (frame->in_async_callback_fn) {
/* enforce return zero from async callbacks like timer */
- if (reg->type != SCALAR_VALUE) {
- verbose(env, "In async callback the register R%d is not a known value (%s)\n",
- regno, reg_type_str(env, reg->type));
- return -EINVAL;
- }
-
- if (!retval_range_within(const_0, reg)) {
- verbose_invalid_scalar(env, reg, const_0, "async callback", reg_name);
- return -EINVAL;
- }
- return 0;
+ exit_ctx = "At async callback return";
+ range = retval_range(0, 0);
+ goto enforce_retval;
}
if (is_subprog && !frame->in_exception_callback_fn) {
@@ -15004,15 +14996,17 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
return 0;
}
+enforce_retval:
if (reg->type != SCALAR_VALUE) {
- verbose(env, "At program exit the register R%d is not a known value (%s)\n",
- regno, reg_type_str(env, reg->type));
+ verbose(env, "%s the register R%d is not a known value (%s)\n",
+ exit_ctx, regno, reg_type_str(env, reg->type));
return -EINVAL;
}
if (!retval_range_within(range, reg)) {
- verbose_invalid_scalar(env, reg, range, "program exit", reg_name);
- if (prog->expected_attach_type == BPF_LSM_CGROUP &&
+ verbose_invalid_scalar(env, reg, range, exit_ctx, reg_name);
+ if (!is_subprog &&
+ prog->expected_attach_type == BPF_LSM_CGROUP &&
prog_type == BPF_PROG_TYPE_LSM &&
!prog->aux->attach_func_proto->type)
verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH bpf-next 08/10] bpf: enforce precision of R0 on program/async callback return
2023-11-22 1:16 [PATCH bpf-next 00/10] BPF verifier retval logic fixes Andrii Nakryiko
` (6 preceding siblings ...)
2023-11-22 1:16 ` [PATCH bpf-next 07/10] bpf: unify async callback and program retval checks Andrii Nakryiko
@ 2023-11-22 1:16 ` Andrii Nakryiko
2023-11-22 15:13 ` Eduard Zingerman
2023-11-22 1:16 ` [PATCH bpf-next 09/10] selftests/bpf: validate async callback return value check correctness Andrii Nakryiko
2023-11-22 1:16 ` [PATCH bpf-next 10/10] selftests/bpf: adjust global_func15 test to validate prog exit precision Andrii Nakryiko
9 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 1:16 UTC (permalink / raw)
To: bpf, ast, daniel, martin.lau; +Cc: andrii, kernel-team
Given we enforce a valid range for program and async callback return
value, we must mark R0 as precise to avoid incorrect state pruning.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
kernel/bpf/verifier.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 87d720d44e0c..1a0a545aec9c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -15003,6 +15003,10 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
return -EINVAL;
}
+ err = mark_chain_precision(env, regno);
+ if (err)
+ return err;
+
if (!retval_range_within(range, reg)) {
verbose_invalid_scalar(env, reg, range, exit_ctx, reg_name);
if (!is_subprog &&
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH bpf-next 09/10] selftests/bpf: validate async callback return value check correctness
2023-11-22 1:16 [PATCH bpf-next 00/10] BPF verifier retval logic fixes Andrii Nakryiko
` (7 preceding siblings ...)
2023-11-22 1:16 ` [PATCH bpf-next 08/10] bpf: enforce precision of R0 on program/async callback return Andrii Nakryiko
@ 2023-11-22 1:16 ` Andrii Nakryiko
2023-11-22 15:13 ` Eduard Zingerman
2023-11-22 1:16 ` [PATCH bpf-next 10/10] selftests/bpf: adjust global_func15 test to validate prog exit precision Andrii Nakryiko
9 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 1:16 UTC (permalink / raw)
To: bpf, ast, daniel, martin.lau; +Cc: andrii, kernel-team
Adjust timer/timer_ret_1 test to validate more carefully verifier logic
of enforcing async callback return value. This test will pass only if
return result is marked precise and read.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
.../selftests/bpf/progs/timer_failure.c | 29 ++++++++++++++-----
1 file changed, 22 insertions(+), 7 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/timer_failure.c b/tools/testing/selftests/bpf/progs/timer_failure.c
index 9000da1e2120..a8dab6697810 100644
--- a/tools/testing/selftests/bpf/progs/timer_failure.c
+++ b/tools/testing/selftests/bpf/progs/timer_failure.c
@@ -21,17 +21,32 @@ struct {
__type(value, struct elem);
} timer_map SEC(".maps");
-static int timer_cb_ret1(void *map, int *key, struct bpf_timer *timer)
+__naked __noinline __used
+static unsigned long timer_cb_ret_bad()
{
- if (bpf_get_smp_processor_id() % 2)
- return 1;
- else
- return 0;
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 > 1000 goto 1f;"
+ "r0 = 0;"
+ "1:"
+ "goto +0;" /* checkpoint */
+ /* async callback is expected to return 0, so branch above
+ * skipping r0 = 0; should lead to a failure, but if exit
+ * instruction doesn't enforce r0's precision, this callback
+ * will be successfully verified
+ */
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_common
+ );
}
SEC("fentry/bpf_fentry_test1")
__failure __msg("should have been in [0, 0]")
-int BPF_PROG2(test_ret_1, int, a)
+__log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+int BPF_PROG2(test_bad_ret, int, a)
{
int key = 0;
struct bpf_timer *timer;
@@ -39,7 +54,7 @@ int BPF_PROG2(test_ret_1, int, a)
timer = bpf_map_lookup_elem(&timer_map, &key);
if (timer) {
bpf_timer_init(timer, &timer_map, CLOCK_BOOTTIME);
- bpf_timer_set_callback(timer, timer_cb_ret1);
+ bpf_timer_set_callback(timer, timer_cb_ret_bad);
bpf_timer_start(timer, 1000, 0);
}
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH bpf-next 10/10] selftests/bpf: adjust global_func15 test to validate prog exit precision
2023-11-22 1:16 [PATCH bpf-next 00/10] BPF verifier retval logic fixes Andrii Nakryiko
` (8 preceding siblings ...)
2023-11-22 1:16 ` [PATCH bpf-next 09/10] selftests/bpf: validate async callback return value check correctness Andrii Nakryiko
@ 2023-11-22 1:16 ` Andrii Nakryiko
2023-11-22 15:13 ` Eduard Zingerman
9 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2023-11-22 1:16 UTC (permalink / raw)
To: bpf, ast, daniel, martin.lau; +Cc: andrii, kernel-team
Add one more subtest to global_func15 selftest to validate that
verifier properly marks r0 as precise and avoids erroneous state pruning
of the branch that has return value outside of expected [0, 1] value.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
.../selftests/bpf/progs/test_global_func15.c | 29 +++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/tools/testing/selftests/bpf/progs/test_global_func15.c b/tools/testing/selftests/bpf/progs/test_global_func15.c
index f80207480e8a..fc24d1e1f287 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func15.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func15.c
@@ -22,3 +22,32 @@ int global_func15(struct __sk_buff *skb)
return v;
}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("At program exit the register R0 has ")
+__log_level(2) __flag(BPF_F_TEST_STATE_FREQ)
+__naked int global_func15_tricky_pruning(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "*(u64 *)(r10 -8) = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 > 1000 goto 1f;"
+ "r0 = 1;"
+ "1:"
+ "goto +0;" /* checkpoint */
+ /* cgroup_skb/ingress program is expected to return [0, 1]
+ * values, so branch above makes sure that in a fallthrough
+ * case we have a valid 1 stored in R0 register, but in
+ * a branch case we assign some random value to R0. So if
+ * there is something wrong with precision tracking for R0 at
+ * program exit, we might erronenously prune branch case,
+ * because R0 in fallthrough case is imprecise (and thus any
+ * value is valid from POV of verifier is_state_equal() logic)
+ */
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_common
+ );
+}
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread