* [PATCH bpf-next 0/4] bpf: Track delta between "linked" registers.
@ 2024-06-08 0:44 Alexei Starovoitov
2024-06-08 0:44 ` [PATCH bpf-next 1/4] bpf: Relax tuple len requirement for sk helpers Alexei Starovoitov
` (3 more replies)
0 siblings, 4 replies; 12+ messages in thread
From: Alexei Starovoitov @ 2024-06-08 0:44 UTC (permalink / raw)
To: bpf; +Cc: daniel, andrii, martin.lau, memxor, eddyz87, kernel-team
From: Alexei Starovoitov <ast@kernel.org>
Compilers can generate the code
r1 = r2
r1 += 0x1
if r2 < 1000 goto ...
use knowledge of r2 range in subsequent r1 operations
The "undo" pass was introduced in LLVM
https://reviews.llvm.org/D121937
to prevent this optimization, but it cannot cover all cases.
Instead of fighting middle end optimizer in BPF backend teach the verifier
about this pattern.
The veristat difference:
File Program Insns (A) Insns (B) Insns (DIFF)
---------------------------------- ------------------ --------- --------- ----------------
arena_htab.bpf.o arena_htab_llvm 18656 768 -17888 (-95.88%)
arena_htab_asm.bpf.o arena_htab_asm 18523 586 -17937 (-96.84%)
iters.bpf.o iter_subprog_iters 1109 981 -128 (-11.54%)
verifier_iterating_callbacks.bpf.o cond_break1 110 121 +11 (+10.00%)
verifier_iterating_callbacks.bpf.o cond_break2 113 91 -22 (-19.47%)
Alexei Starovoitov (4):
bpf: Relax tuple len requirement for sk helpers.
bpf: Track delta between "linked" registers.
bpf: Support can_loop/cond_break on big endian
selftests/bpf: Add tests for add_const
include/linux/bpf_verifier.h | 12 +-
kernel/bpf/log.c | 4 +-
kernel/bpf/verifier.c | 90 +++++++++--
net/core/filter.c | 24 +--
.../testing/selftests/bpf/bpf_experimental.h | 28 ++++
.../testing/selftests/bpf/progs/arena_htab.c | 16 +-
.../bpf/progs/verifier_iterating_callbacks.c | 150 ++++++++++++++++++
7 files changed, 298 insertions(+), 26 deletions(-)
--
2.43.0
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH bpf-next 1/4] bpf: Relax tuple len requirement for sk helpers.
2024-06-08 0:44 [PATCH bpf-next 0/4] bpf: Track delta between "linked" registers Alexei Starovoitov
@ 2024-06-08 0:44 ` Alexei Starovoitov
2024-06-08 0:44 ` [PATCH bpf-next 2/4] bpf: Track delta between "linked" registers Alexei Starovoitov
` (2 subsequent siblings)
3 siblings, 0 replies; 12+ messages in thread
From: Alexei Starovoitov @ 2024-06-08 0:44 UTC (permalink / raw)
To: bpf; +Cc: daniel, andrii, martin.lau, memxor, eddyz87, kernel-team
From: Alexei Starovoitov <ast@kernel.org>
__bpf_skc_lookup() safely handles incorrect values of tuple len,
hence we can allow zero to be passed as tuple len.
This patch alone doesn't make an observable verifier difference.
It's a trivial improvement that might simplify bpf programs.
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
net/core/filter.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/net/core/filter.c b/net/core/filter.c
index 7c46ecba3b01..cb133232a887 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6815,7 +6815,7 @@ static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6834,7 +6834,7 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6853,7 +6853,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6877,7 +6877,7 @@ static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6901,7 +6901,7 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6925,7 +6925,7 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6963,7 +6963,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6987,7 +6987,7 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7011,7 +7011,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7031,7 +7031,7 @@ static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7050,7 +7050,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7069,7 +7069,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
--
2.43.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH bpf-next 2/4] bpf: Track delta between "linked" registers.
2024-06-08 0:44 [PATCH bpf-next 0/4] bpf: Track delta between "linked" registers Alexei Starovoitov
2024-06-08 0:44 ` [PATCH bpf-next 1/4] bpf: Relax tuple len requirement for sk helpers Alexei Starovoitov
@ 2024-06-08 0:44 ` Alexei Starovoitov
2024-06-10 18:32 ` Eduard Zingerman
2024-06-08 0:44 ` [PATCH bpf-next 3/4] bpf: Support can_loop/cond_break on big endian Alexei Starovoitov
2024-06-08 0:44 ` [PATCH bpf-next 4/4] selftests/bpf: Add tests for add_const Alexei Starovoitov
3 siblings, 1 reply; 12+ messages in thread
From: Alexei Starovoitov @ 2024-06-08 0:44 UTC (permalink / raw)
To: bpf; +Cc: daniel, andrii, martin.lau, memxor, eddyz87, kernel-team
From: Alexei Starovoitov <ast@kernel.org>
Compilers can generate the code
r1 = r2
r1 += 0x1
if r2 < 1000 goto ...
use knowledge of r2 range in subsequent r1 operations
So remember constant delta between r2 and r1 and update r1 after 'if' condition.
Unfortunately LLVM still uses this pattern for loops with 'can_loop' construct:
for (i = 0; i < 1000 && can_loop; i++)
The "undo" pass was introduced in LLVM
https://reviews.llvm.org/D121937
to prevent this optimization, but it cannot cover all cases.
Instead of fighting middle end optimizer in BPF backend teach the verifier
about this pattern.
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
include/linux/bpf_verifier.h | 12 ++++-
kernel/bpf/log.c | 4 +-
kernel/bpf/verifier.c | 90 ++++++++++++++++++++++++++++++++----
3 files changed, 95 insertions(+), 11 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 50aa87f8d77f..2b54e25d2364 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -73,7 +73,10 @@ enum bpf_iter_state {
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
- /* Fixed part of pointer offset, pointer types only */
+ /*
+ * Fixed part of pointer offset, pointer types only.
+ * Or constant delta between "linked" scalars with the same ID.
+ */
s32 off;
union {
/* valid when type == PTR_TO_PACKET */
@@ -167,6 +170,13 @@ struct bpf_reg_state {
* Similarly to dynptrs, we use ID to track "belonging" of a reference
* to a specific instance of bpf_iter.
*/
+ /*
+ * Upper bit of ID is used to remember relationship between "linked"
+ * registers. Example:
+ * r1 = r2; both will have r1->id == r2->id == N
+ * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
+ */
+#define BPF_ADD_CONST (1U << 31)
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
* from a pointer-cast helper, bpf_sk_fullsock() and
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index 4bd8f17a9f24..3f4ae92e549f 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -708,7 +708,9 @@ static void print_reg_state(struct bpf_verifier_env *env,
verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
verbose(env, "(");
if (reg->id)
- verbose_a("id=%d", reg->id);
+ verbose_a("id=%d", reg->id & ~BPF_ADD_CONST);
+ if (reg->id & BPF_ADD_CONST)
+ verbose(env, "%+d", reg->off);
if (reg->ref_obj_id)
verbose_a("ref_obj_id=%d", reg->ref_obj_id);
if (type_is_non_owning_ref(reg->type))
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 81a3d2ced78d..e282625995fc 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -4438,8 +4438,20 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
struct bpf_reg_state *src_reg)
{
- if (src_reg->type == SCALAR_VALUE && !src_reg->id &&
- !tnum_is_const(src_reg->var_off))
+ if (src_reg->type != SCALAR_VALUE)
+ return;
+
+ if (src_reg->id & BPF_ADD_CONST) {
+ /*
+ * The verifier is processing rX = rY insn and
+ * rY->id has special linked register already.
+ * Cleared it, since multiple rX += const are not supported.
+ */
+ src_reg->id = 0;
+ src_reg->off = 0;
+ }
+
+ if (!src_reg->id && !tnum_is_const(src_reg->var_off))
/* Ensure that src_reg has a valid ID that will be copied to
* dst_reg and then will be used by find_equal_scalars() to
* propagate min/max range.
@@ -14026,6 +14038,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
+ bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
u8 opcode = BPF_OP(insn->code);
int err;
@@ -14048,11 +14061,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
if (dst_reg->type != SCALAR_VALUE)
ptr_reg = dst_reg;
- else
- /* Make sure ID is cleared otherwise dst_reg min/max could be
- * incorrectly propagated into other registers by find_equal_scalars()
- */
- dst_reg->id = 0;
+
if (BPF_SRC(insn->code) == BPF_X) {
src_reg = ®s[insn->src_reg];
if (src_reg->type != SCALAR_VALUE) {
@@ -14116,7 +14125,40 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
verbose(env, "verifier internal error: no src_reg\n");
return -EINVAL;
}
- return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
+ err = adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
+ if (err)
+ return err;
+ /*
+ * Compilers can generate the code
+ * r1 = r2
+ * r1 += 0x1
+ * if r2 < 1000 goto ...
+ * use r1 in memory access
+ * So remember constant delta between r2 and r1 and update r1 after
+ * 'if' condition.
+ */
+ if (BPF_OP(insn->code) == BPF_ADD && dst_reg->id && is_reg_const(src_reg, alu32)) {
+ u64 val = reg_const_value(src_reg, alu32);
+
+ if ((dst_reg->id & BPF_ADD_CONST) || val > (u32)S32_MAX) {
+ /*
+ * If the register already went through rX += val
+ * we cannot accumulate another val into rx->off.
+ */
+ dst_reg->off = 0;
+ dst_reg->id = 0;
+ } else {
+ dst_reg->id |= BPF_ADD_CONST;
+ dst_reg->off = val;
+ }
+ } else {
+ /*
+ * Make sure ID is cleared otherwise dst_reg min/max could be
+ * incorrectly propagated into other registers by find_equal_scalars()
+ */
+ dst_reg->id = 0;
+ }
+ return 0;
}
/* check validity of 32-bit and 64-bit arithmetic operations */
@@ -15088,13 +15130,43 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
static void find_equal_scalars(struct bpf_verifier_state *vstate,
struct bpf_reg_state *known_reg)
{
+ struct bpf_reg_state fake_reg;
struct bpf_func_state *state;
struct bpf_reg_state *reg;
bpf_for_each_reg_in_vstate(vstate, state, reg, ({
- if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
+ if (reg->type != SCALAR_VALUE || reg == known_reg)
+ continue;
+ if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST))
+ continue;
+ if ((reg->id & BPF_ADD_CONST) == (known_reg->id & BPF_ADD_CONST)) {
copy_register_state(reg, known_reg);
+ } else if ((reg->id & BPF_ADD_CONST) && reg->off) {
+ /* reg = known_reg; reg += const */
+ copy_register_state(reg, known_reg);
+
+ fake_reg.type = SCALAR_VALUE;
+ __mark_reg_known(&fake_reg, reg->off);
+ scalar32_min_max_add(reg, &fake_reg);
+ scalar_min_max_add(reg, &fake_reg);
+ reg->var_off = tnum_add(reg->var_off, fake_reg.var_off);
+ reg->off = 0;
+ reg->id &= ~BPF_ADD_CONST;
+ } else if ((known_reg->id & BPF_ADD_CONST) && known_reg->off) {
+ /* reg = known_reg; reg -= const' */
+ copy_register_state(reg, known_reg);
+
+ fake_reg.type = SCALAR_VALUE;
+ __mark_reg_known(&fake_reg, known_reg->off);
+ scalar32_min_max_sub(reg, &fake_reg);
+ scalar_min_max_sub(reg, &fake_reg);
+ reg->var_off = tnum_sub(reg->var_off, fake_reg.var_off);
+ }
}));
+ if (known_reg->id & BPF_ADD_CONST) {
+ known_reg->id = 0;
+ known_reg->off = 0;
+ }
}
static int check_cond_jmp_op(struct bpf_verifier_env *env,
--
2.43.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH bpf-next 3/4] bpf: Support can_loop/cond_break on big endian
2024-06-08 0:44 [PATCH bpf-next 0/4] bpf: Track delta between "linked" registers Alexei Starovoitov
2024-06-08 0:44 ` [PATCH bpf-next 1/4] bpf: Relax tuple len requirement for sk helpers Alexei Starovoitov
2024-06-08 0:44 ` [PATCH bpf-next 2/4] bpf: Track delta between "linked" registers Alexei Starovoitov
@ 2024-06-08 0:44 ` Alexei Starovoitov
2024-06-10 5:44 ` Yonghong Song
2024-06-08 0:44 ` [PATCH bpf-next 4/4] selftests/bpf: Add tests for add_const Alexei Starovoitov
3 siblings, 1 reply; 12+ messages in thread
From: Alexei Starovoitov @ 2024-06-08 0:44 UTC (permalink / raw)
To: bpf; +Cc: daniel, andrii, martin.lau, memxor, eddyz87, kernel-team
From: Alexei Starovoitov <ast@kernel.org>
Add big endian support for can_loop/cond_break macros.
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
.../testing/selftests/bpf/bpf_experimental.h | 28 +++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 3d9e4b8c6b81..82b73c37b50b 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -351,6 +351,7 @@ l_true: \
l_continue:; \
})
#else
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define can_loop \
({ __label__ l_break, l_continue; \
bool ret = true; \
@@ -376,6 +377,33 @@ l_true: \
l_break: break; \
l_continue:; \
})
+#else
+#define can_loop \
+ ({ __label__ l_break, l_continue; \
+ bool ret = true; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: ret = false; \
+ l_continue:; \
+ ret; \
+ })
+
+#define cond_break \
+ ({ __label__ l_break, l_continue; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: break; \
+ l_continue:; \
+ })
+#endif
#endif
#ifndef bpf_nop_mov
--
2.43.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH bpf-next 4/4] selftests/bpf: Add tests for add_const
2024-06-08 0:44 [PATCH bpf-next 0/4] bpf: Track delta between "linked" registers Alexei Starovoitov
` (2 preceding siblings ...)
2024-06-08 0:44 ` [PATCH bpf-next 3/4] bpf: Support can_loop/cond_break on big endian Alexei Starovoitov
@ 2024-06-08 0:44 ` Alexei Starovoitov
3 siblings, 0 replies; 12+ messages in thread
From: Alexei Starovoitov @ 2024-06-08 0:44 UTC (permalink / raw)
To: bpf; +Cc: daniel, andrii, martin.lau, memxor, eddyz87, kernel-team
From: Alexei Starovoitov <ast@kernel.org>
Improve arena based tests and add several C and asm tests
with specific pattern.
These tests would have failed without add_const verifier support.
Also add several loop_inside_iter*() tests that are not related to add_const,
but nice to have.
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
.../testing/selftests/bpf/progs/arena_htab.c | 16 +-
.../bpf/progs/verifier_iterating_callbacks.c | 150 ++++++++++++++++++
2 files changed, 163 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c
index 1e6ac187a6a0..cd598348725e 100644
--- a/tools/testing/selftests/bpf/progs/arena_htab.c
+++ b/tools/testing/selftests/bpf/progs/arena_htab.c
@@ -18,25 +18,35 @@ void __arena *htab_for_user;
bool skip = false;
int zero = 0;
+char __arena arr1[100000];
+char arr2[1000];
SEC("syscall")
int arena_htab_llvm(void *ctx)
{
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM)
struct htab __arena *htab;
+ char __arena *arr = arr1;
__u64 i;
htab = bpf_alloc(sizeof(*htab));
cast_kern(htab);
htab_init(htab);
+ cast_kern(arr);
+
/* first run. No old elems in the table */
- for (i = zero; i < 1000; i++)
+ for (i = zero; i < 100000 && can_loop; i++) {
htab_update_elem(htab, i, i);
+ arr[i] = i;
+ }
- /* should replace all elems with new ones */
- for (i = zero; i < 1000; i++)
+ /* should replace some elems with new ones */
+ for (i = zero; i < 1000 && can_loop; i++) {
htab_update_elem(htab, i, i);
+ /* Access mem to make the verifier use bounded loop logic */
+ arr2[i] = i;
+ }
cast_user(htab);
htab_for_user = htab;
#else
diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
index bd676d7e615f..a87100bf3862 100644
--- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
+++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
@@ -405,4 +405,154 @@ int cond_break5(const void *ctx)
return cnt1 > 1 && cnt2 > 1 ? 1 : 0;
}
+#define ARR2_SZ 1000
+SEC(".data.arr2")
+char arr2[ARR2_SZ];
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ __u64 i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < ARR2_SZ)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter_signed(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ long i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < ARR2_SZ && i >= 0)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+volatile const int limit = ARR2_SZ;
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter_volatile_limit(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ __u64 i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < limit)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+#define ARR_LONG_SZ 1000
+
+SEC(".data.arr_long")
+long arr_long[ARR_LONG_SZ];
+
+SEC("socket")
+__success
+int test1(const void *ctx)
+{
+ long i;
+
+ for (i = 0; i < ARR_LONG_SZ && can_loop; i++)
+ arr_long[i] = i;
+ return 0;
+}
+
+SEC("socket")
+__success
+int test2(const void *ctx)
+{
+ __u64 i;
+
+ for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
+ barrier_var(i);
+ arr_long[i] = i;
+ }
+ return 0;
+}
+
+SEC(".data.arr_foo")
+struct {
+ int a;
+ int b;
+} arr_foo[ARR_LONG_SZ];
+
+SEC("socket")
+__success
+int test3(const void *ctx)
+{
+ __u64 i;
+
+ for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
+ barrier_var(i);
+ arr_foo[i].a = i;
+ arr_foo[i].b = i;
+ }
+ return 0;
+}
+
+SEC("socket")
+__success
+int test4(const void *ctx)
+{
+ long i;
+
+ for (i = zero + ARR_LONG_SZ - 1; i < ARR_LONG_SZ && i >= 0 && can_loop; i--) {
+ barrier_var(i);
+ arr_foo[i].a = i;
+ arr_foo[i].b = i;
+ }
+ return 0;
+}
+
+char buf[10] SEC(".data.buf");
+
+SEC("socket")
+__description("check add const")
+__success
+__naked void check_add_const(void)
+{
+ /* typical LLVM generated loop with may_goto */
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 >= 10 goto l1_%=; \
+l0_%=: r1 = %[buf]; \
+ r1 += r0; \
+ r3 = *(u8 *)(r1 +0); \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 4; /* off of l1_%=: */ \
+ .long 0; /* imm */ \
+ r2 = r0; \
+ r2 += 1; \
+ if r2 <= 10 goto l0_%=; \
+ exit; \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
char _license[] SEC("license") = "GPL";
--
2.43.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH bpf-next 3/4] bpf: Support can_loop/cond_break on big endian
2024-06-08 0:44 ` [PATCH bpf-next 3/4] bpf: Support can_loop/cond_break on big endian Alexei Starovoitov
@ 2024-06-10 5:44 ` Yonghong Song
0 siblings, 0 replies; 12+ messages in thread
From: Yonghong Song @ 2024-06-10 5:44 UTC (permalink / raw)
To: Alexei Starovoitov, bpf
Cc: daniel, andrii, martin.lau, memxor, eddyz87, kernel-team
On 6/7/24 5:44 PM, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
>
> Add big endian support for can_loop/cond_break macros.
>
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH bpf-next 2/4] bpf: Track delta between "linked" registers.
2024-06-08 0:44 ` [PATCH bpf-next 2/4] bpf: Track delta between "linked" registers Alexei Starovoitov
@ 2024-06-10 18:32 ` Eduard Zingerman
2024-06-10 18:56 ` Eduard Zingerman
2024-06-10 20:27 ` Alexei Starovoitov
0 siblings, 2 replies; 12+ messages in thread
From: Eduard Zingerman @ 2024-06-10 18:32 UTC (permalink / raw)
To: Alexei Starovoitov, bpf; +Cc: daniel, andrii, martin.lau, memxor, kernel-team
On Fri, 2024-06-07 at 17:44 -0700, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
>
> Compilers can generate the code
> r1 = r2
> r1 += 0x1
> if r2 < 1000 goto ...
> use knowledge of r2 range in subsequent r1 operations
>
> So remember constant delta between r2 and r1 and update r1 after 'if' condition.
>
> Unfortunately LLVM still uses this pattern for loops with 'can_loop' construct:
> for (i = 0; i < 1000 && can_loop; i++)
>
> The "undo" pass was introduced in LLVM
> https://reviews.llvm.org/D121937
> to prevent this optimization, but it cannot cover all cases.
> Instead of fighting middle end optimizer in BPF backend teach the verifier
> about this pattern.
I like this idea.
In theory it could be generalized to handle situations when LLVM
uses two counters in parallel:
r0 = 0 // as an index
r1 = 0 // as a pointer
...
r0 += 1
r1 += 8
>
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> ---
[...]
> @@ -15088,13 +15130,43 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
> static void find_equal_scalars(struct bpf_verifier_state *vstate,
> struct bpf_reg_state *known_reg)
> {
> + struct bpf_reg_state fake_reg;
> struct bpf_func_state *state;
> struct bpf_reg_state *reg;
>
> bpf_for_each_reg_in_vstate(vstate, state, reg, ({
> - if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
> + if (reg->type != SCALAR_VALUE || reg == known_reg)
> + continue;
> + if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST))
> + continue;
> + if ((reg->id & BPF_ADD_CONST) == (known_reg->id & BPF_ADD_CONST)) {
> copy_register_state(reg, known_reg);
> + } else if ((reg->id & BPF_ADD_CONST) && reg->off) {
> + /* reg = known_reg; reg += const */
> + copy_register_state(reg, known_reg);
> +
> + fake_reg.type = SCALAR_VALUE;
> + __mark_reg_known(&fake_reg, reg->off);
> + scalar32_min_max_add(reg, &fake_reg);
> + scalar_min_max_add(reg, &fake_reg);
> + reg->var_off = tnum_add(reg->var_off, fake_reg.var_off);
> + reg->off = 0;
> + reg->id &= ~BPF_ADD_CONST;
> + } else if ((known_reg->id & BPF_ADD_CONST) && known_reg->off) {
> + /* reg = known_reg; reg -= const' */
> + copy_register_state(reg, known_reg);
> +
> + fake_reg.type = SCALAR_VALUE;
> + __mark_reg_known(&fake_reg, known_reg->off);
> + scalar32_min_max_sub(reg, &fake_reg);
> + scalar_min_max_sub(reg, &fake_reg);
> + reg->var_off = tnum_sub(reg->var_off, fake_reg.var_off);
> + }
I think that copy_register_state logic is off here,
the copy overwrites reg->off before it is used to update the value.
The following test is marked as safe for me, while it should not:
char buf[10] SEC(".data.buf");
SEC("socket")
__failure
__msg("*(u8 *)(r7 +0) = r0")
__msg("invalid access to map value, value_size=10 off=9 size=1")
__naked void check_add_const_3regs(void)
{
asm volatile (
"r6 = %[buf];"
"r7 = %[buf];"
"call %[bpf_ktime_get_ns];"
"r1 = r0;" /* link r0.id == r1.id == r2.id */
"r2 = r0;"
"r1 += 1;" /* r1 == r0+1 */
"r2 += 2;" /* r2 == r0+2 */
"if r0 > 8 goto 1f;" /* r0 range [0, 8] */
"r6 += r1;" /* r1 range [1, 9] */
"r7 += r2;" /* r2 range [2, 10] */
"*(u8 *)(r6 +0) = r0;" /* safe, within bounds */
"*(u8 *)(r7 +0) = r0;" /* unsafe, out of bounds */
"1: exit;"
:
: __imm(bpf_ktime_get_ns),
__imm_ptr(buf)
: __clobber_common);
}
The conditional r0 > 8 propagates same range for r{0,1,2}:
7: (07) r1 += 1 ; R1_w=scalar(id=1+1)
8: (07) r2 += 2 ; R2_w=scalar(id=1+2)
9: (25) if r0 > 0x8 goto pc+4 ; R0_w=scalar(id=1,smin=smin32=0,smax=umax=smax32=umax32=8,var_off=(0x0; 0xf))
10: (0f) r6 += r1
11: R1_w=scalar(id=1,smin=smin32=0,smax=umax=smax32=umax32=8,var_off=(0x0; 0xf)) R6_w=...
11: (0f) r7 += r2
12: R2_w=scalar(id=1,smin=smin32=0,smax=umax=smax32=umax32=8,var_off=(0x0; 0xf)) R7_w=...
> }));
> + if (known_reg->id & BPF_ADD_CONST) {
> + known_reg->id = 0;
> + known_reg->off = 0;
> + }
> }
>
> static int check_cond_jmp_op(struct bpf_verifier_env *env,
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH bpf-next 2/4] bpf: Track delta between "linked" registers.
2024-06-10 18:32 ` Eduard Zingerman
@ 2024-06-10 18:56 ` Eduard Zingerman
2024-06-10 20:31 ` Alexei Starovoitov
2024-06-10 20:27 ` Alexei Starovoitov
1 sibling, 1 reply; 12+ messages in thread
From: Eduard Zingerman @ 2024-06-10 18:56 UTC (permalink / raw)
To: Alexei Starovoitov, bpf; +Cc: daniel, andrii, martin.lau, memxor, kernel-team
Also note that mark_precise_scalar_ids() needs to be updated
to use mask for ->id extraction.
(Although, that function is broken and I should spill out
v2 of the patch-set that removes it asap).
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH bpf-next 2/4] bpf: Track delta between "linked" registers.
2024-06-10 18:32 ` Eduard Zingerman
2024-06-10 18:56 ` Eduard Zingerman
@ 2024-06-10 20:27 ` Alexei Starovoitov
2024-06-10 21:25 ` Alexei Starovoitov
1 sibling, 1 reply; 12+ messages in thread
From: Alexei Starovoitov @ 2024-06-10 20:27 UTC (permalink / raw)
To: Eduard Zingerman
Cc: bpf, Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau,
Kumar Kartikeya Dwivedi, Kernel Team
On Mon, Jun 10, 2024 at 11:32 AM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Fri, 2024-06-07 at 17:44 -0700, Alexei Starovoitov wrote:
> > From: Alexei Starovoitov <ast@kernel.org>
> >
> > Compilers can generate the code
> > r1 = r2
> > r1 += 0x1
> > if r2 < 1000 goto ...
> > use knowledge of r2 range in subsequent r1 operations
> >
> > So remember constant delta between r2 and r1 and update r1 after 'if' condition.
> >
> > Unfortunately LLVM still uses this pattern for loops with 'can_loop' construct:
> > for (i = 0; i < 1000 && can_loop; i++)
> >
> > The "undo" pass was introduced in LLVM
> > https://reviews.llvm.org/D121937
> > to prevent this optimization, but it cannot cover all cases.
> > Instead of fighting middle end optimizer in BPF backend teach the verifier
> > about this pattern.
>
> I like this idea.
> In theory it could be generalized to handle situations when LLVM
> uses two counters in parallel:
>
> r0 = 0 // as an index
> r1 = 0 // as a pointer
> ...
> r0 += 1
> r1 += 8
I don't see how the verifier can associate r0 and r1.
In this example r0 with be a scalar while
r1 = ld_imm64 map
One reg will be counting loops.
Another adding fixed offset to map value.
> >
> > Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> > ---
>
> [...]
>
> > @@ -15088,13 +15130,43 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
> > static void find_equal_scalars(struct bpf_verifier_state *vstate,
> > struct bpf_reg_state *known_reg)
> > {
> > + struct bpf_reg_state fake_reg;
> > struct bpf_func_state *state;
> > struct bpf_reg_state *reg;
> >
> > bpf_for_each_reg_in_vstate(vstate, state, reg, ({
> > - if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
> > + if (reg->type != SCALAR_VALUE || reg == known_reg)
> > + continue;
> > + if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST))
> > + continue;
> > + if ((reg->id & BPF_ADD_CONST) == (known_reg->id & BPF_ADD_CONST)) {
> > copy_register_state(reg, known_reg);
> > + } else if ((reg->id & BPF_ADD_CONST) && reg->off) {
> > + /* reg = known_reg; reg += const */
> > + copy_register_state(reg, known_reg);
> > +
> > + fake_reg.type = SCALAR_VALUE;
> > + __mark_reg_known(&fake_reg, reg->off);
> > + scalar32_min_max_add(reg, &fake_reg);
> > + scalar_min_max_add(reg, &fake_reg);
> > + reg->var_off = tnum_add(reg->var_off, fake_reg.var_off);
> > + reg->off = 0;
> > + reg->id &= ~BPF_ADD_CONST;
> > + } else if ((known_reg->id & BPF_ADD_CONST) && known_reg->off) {
> > + /* reg = known_reg; reg -= const' */
> > + copy_register_state(reg, known_reg);
> > +
> > + fake_reg.type = SCALAR_VALUE;
> > + __mark_reg_known(&fake_reg, known_reg->off);
> > + scalar32_min_max_sub(reg, &fake_reg);
> > + scalar_min_max_sub(reg, &fake_reg);
> > + reg->var_off = tnum_sub(reg->var_off, fake_reg.var_off);
> > + }
>
> I think that copy_register_state logic is off here,
> the copy overwrites reg->off before it is used to update the value.
Right. Last minute refactoring got bad :(
I had 'u32 off = reg->off' all along and then "refactored".
> The following test is marked as safe for me, while it should not:
Thanks for the test. Will incorporate.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH bpf-next 2/4] bpf: Track delta between "linked" registers.
2024-06-10 18:56 ` Eduard Zingerman
@ 2024-06-10 20:31 ` Alexei Starovoitov
2024-06-10 21:51 ` Eduard Zingerman
0 siblings, 1 reply; 12+ messages in thread
From: Alexei Starovoitov @ 2024-06-10 20:31 UTC (permalink / raw)
To: Eduard Zingerman
Cc: bpf, Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau,
Kumar Kartikeya Dwivedi, Kernel Team
On Mon, Jun 10, 2024 at 11:56 AM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> Also note that mark_precise_scalar_ids() needs to be updated
> to use mask for ->id extraction.
> (Although, that function is broken and I should spill out
> v2 of the patch-set that removes it asap).
Ahh. Right.
I've used
#define BPF_ADD_CONST (1U << 31)
instead of
u32 id:31;
u32 add_const:1;
to make sure that all ID comparisons in the rest of the verifier
are using both id and flag together and idmap stays as-is.
I missed mark_precise_scalar_ids() that needs to match
what find_equal_scalars() is doing.
What's broken in there?
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH bpf-next 2/4] bpf: Track delta between "linked" registers.
2024-06-10 20:27 ` Alexei Starovoitov
@ 2024-06-10 21:25 ` Alexei Starovoitov
0 siblings, 0 replies; 12+ messages in thread
From: Alexei Starovoitov @ 2024-06-10 21:25 UTC (permalink / raw)
To: Eduard Zingerman
Cc: bpf, Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau,
Kumar Kartikeya Dwivedi, Kernel Team
On Mon, Jun 10, 2024 at 1:27 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Mon, Jun 10, 2024 at 11:32 AM Eduard Zingerman <eddyz87@gmail.com> wrote:
> >
> > On Fri, 2024-06-07 at 17:44 -0700, Alexei Starovoitov wrote:
> > > From: Alexei Starovoitov <ast@kernel.org>
> > >
> > > Compilers can generate the code
> > > r1 = r2
> > > r1 += 0x1
> > > if r2 < 1000 goto ...
> > > use knowledge of r2 range in subsequent r1 operations
> > >
> > > So remember constant delta between r2 and r1 and update r1 after 'if' condition.
> > >
> > > Unfortunately LLVM still uses this pattern for loops with 'can_loop' construct:
> > > for (i = 0; i < 1000 && can_loop; i++)
> > >
> > > The "undo" pass was introduced in LLVM
> > > https://reviews.llvm.org/D121937
> > > to prevent this optimization, but it cannot cover all cases.
> > > Instead of fighting middle end optimizer in BPF backend teach the verifier
> > > about this pattern.
> >
> > I like this idea.
> > In theory it could be generalized to handle situations when LLVM
> > uses two counters in parallel:
> >
> > r0 = 0 // as an index
> > r1 = 0 // as a pointer
> > ...
> > r0 += 1
> > r1 += 8
>
> I don't see how the verifier can associate r0 and r1.
> In this example r0 with be a scalar while
> r1 = ld_imm64 map
>
> One reg will be counting loops.
> Another adding fixed offset to map value.
>
> > >
> > > Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> > > ---
> >
> > [...]
> >
> > > @@ -15088,13 +15130,43 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
> > > static void find_equal_scalars(struct bpf_verifier_state *vstate,
> > > struct bpf_reg_state *known_reg)
> > > {
> > > + struct bpf_reg_state fake_reg;
> > > struct bpf_func_state *state;
> > > struct bpf_reg_state *reg;
> > >
> > > bpf_for_each_reg_in_vstate(vstate, state, reg, ({
> > > - if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
> > > + if (reg->type != SCALAR_VALUE || reg == known_reg)
> > > + continue;
> > > + if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST))
> > > + continue;
> > > + if ((reg->id & BPF_ADD_CONST) == (known_reg->id & BPF_ADD_CONST)) {
> > > copy_register_state(reg, known_reg);
> > > + } else if ((reg->id & BPF_ADD_CONST) && reg->off) {
> > > + /* reg = known_reg; reg += const */
> > > + copy_register_state(reg, known_reg);
> > > +
> > > + fake_reg.type = SCALAR_VALUE;
> > > + __mark_reg_known(&fake_reg, reg->off);
> > > + scalar32_min_max_add(reg, &fake_reg);
> > > + scalar_min_max_add(reg, &fake_reg);
> > > + reg->var_off = tnum_add(reg->var_off, fake_reg.var_off);
> > > + reg->off = 0;
> > > + reg->id &= ~BPF_ADD_CONST;
> > > + } else if ((known_reg->id & BPF_ADD_CONST) && known_reg->off) {
> > > + /* reg = known_reg; reg -= const' */
> > > + copy_register_state(reg, known_reg);
> > > +
> > > + fake_reg.type = SCALAR_VALUE;
> > > + __mark_reg_known(&fake_reg, known_reg->off);
> > > + scalar32_min_max_sub(reg, &fake_reg);
> > > + scalar_min_max_sub(reg, &fake_reg);
> > > + reg->var_off = tnum_sub(reg->var_off, fake_reg.var_off);
> > > + }
> >
> > I think that copy_register_state logic is off here,
> > the copy overwrites reg->off before it is used to update the value.
>
> Right. Last minute refactoring got bad :(
> I had 'u32 off = reg->off' all along and then "refactored".
Realized that reg->off != known_reg->off where both have add_const bit
set is actually possible.
This case is mishandled in the above. Will fix it too.
pw-bot: cr
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH bpf-next 2/4] bpf: Track delta between "linked" registers.
2024-06-10 20:31 ` Alexei Starovoitov
@ 2024-06-10 21:51 ` Eduard Zingerman
0 siblings, 0 replies; 12+ messages in thread
From: Eduard Zingerman @ 2024-06-10 21:51 UTC (permalink / raw)
To: Alexei Starovoitov
Cc: bpf, Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau,
Kumar Kartikeya Dwivedi, Kernel Team
On Mon, 2024-06-10 at 13:31 -0700, Alexei Starovoitov wrote:
[...]
> I missed mark_precise_scalar_ids() that needs to match
> what find_equal_scalars() is doing.
>
> What's broken in there?
Sorry, missed this question, here is the link:
https://lore.kernel.org/bpf/20240222005005.31784-3-eddyz87@gmail.com/
TLDR: whole function is wrong (only handles a subset of possible situations).
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2024-06-10 21:52 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-06-08 0:44 [PATCH bpf-next 0/4] bpf: Track delta between "linked" registers Alexei Starovoitov
2024-06-08 0:44 ` [PATCH bpf-next 1/4] bpf: Relax tuple len requirement for sk helpers Alexei Starovoitov
2024-06-08 0:44 ` [PATCH bpf-next 2/4] bpf: Track delta between "linked" registers Alexei Starovoitov
2024-06-10 18:32 ` Eduard Zingerman
2024-06-10 18:56 ` Eduard Zingerman
2024-06-10 20:31 ` Alexei Starovoitov
2024-06-10 21:51 ` Eduard Zingerman
2024-06-10 20:27 ` Alexei Starovoitov
2024-06-10 21:25 ` Alexei Starovoitov
2024-06-08 0:44 ` [PATCH bpf-next 3/4] bpf: Support can_loop/cond_break on big endian Alexei Starovoitov
2024-06-10 5:44 ` Yonghong Song
2024-06-08 0:44 ` [PATCH bpf-next 4/4] selftests/bpf: Add tests for add_const Alexei Starovoitov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox