* [PATCH bpf-next v1] bpf, x86: Add support for signed arena loads
@ 2025-05-09 19:49 Kumar Kartikeya Dwivedi
2025-05-14 0:48 ` Alexei Starovoitov
0 siblings, 1 reply; 2+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2025-05-09 19:49 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann,
Martin KaFai Lau, Eduard Zingerman, kkd, kernel-team
Currently, signed load instructions into arena memory are unsupported.
The compiler is free to generate these, and on GCC-14 we see a
corresponding error when it happens. The hurdle in supporting them is
deciding which unused opcode to use to mark them for the JIT's own
consumption. After much thinking, it appears 0xc0 / BPF_NOSPEC can be
combined with load instructions to identify signed arena loads. Use
this to recognize and JIT them appropriately, and remove the verifier
side limitation on the program if the JIT supports them.
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
arch/x86/net/bpf_jit_comp.c | 40 ++++++++++++++++++++++++++++++++++++-
include/linux/filter.h | 4 ++++
kernel/bpf/core.c | 5 +++++
kernel/bpf/verifier.c | 10 +++++++---
4 files changed, 55 insertions(+), 4 deletions(-)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 9e5fe2ba858f..1f1ed0cb7416 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1146,11 +1146,38 @@ static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 i
*pprog = prog;
}
+static void emit_ldsx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
+{
+ u8 *prog = *pprog;
+
+ switch (size) {
+ case BPF_B:
+ /* movsx rax, byte ptr [rax + r12 + off] */
+ EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xBE);
+ break;
+ case BPF_H:
+ /* movsx rax, word ptr [rax + r12 + off] */
+ EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xBF);
+ break;
+ case BPF_W:
+ /* movsx rax, dword ptr [rax + r12 + off] */
+ EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x63);
+ break;
+ }
+ emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
+ *pprog = prog;
+}
+
static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
}
+static void emit_ldsx_r12(u8 **prog, u32 size, u32 dst_reg, u32 src_reg, int off)
+{
+ emit_ldsx_index(prog, size, dst_reg, src_reg, X86_REG_R12, off);
+}
+
/* STX: *(u8*)(dst_reg + off) = src_reg */
static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
@@ -2010,13 +2037,19 @@ st: if (is_imm8(insn->off))
case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
+ case BPF_LDX | BPF_PROBE_MEM32SX | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEM32SX | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEM32SX | BPF_W:
case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
start_of_ldx = prog;
if (BPF_CLASS(insn->code) == BPF_LDX)
- emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32SX)
+ emit_ldsx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
+ else
+ emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
else
emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
populate_extable:
@@ -3875,3 +3908,8 @@ bool bpf_jit_supports_timed_may_goto(void)
{
return true;
}
+
+bool bpf_jit_supports_signed_arena_load(void)
+{
+ return true;
+}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index f5cf4d35d83e..af7e13037850 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -78,6 +78,9 @@ struct ctl_table_header;
/* unused opcode to mark special atomic instruction */
#define BPF_PROBE_ATOMIC 0xe0
+/* unused opcode to mark special ldsx instruction. Same as BPF_NOSPEC */
+#define BPF_PROBE_MEM32SX 0xc0
+
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
@@ -1138,6 +1141,7 @@ bool bpf_jit_supports_arena(void);
bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
bool bpf_jit_supports_private_stack(void);
bool bpf_jit_supports_timed_may_goto(void);
+bool bpf_jit_supports_signed_arena_load(void);
u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
u64 arch_bpf_timed_may_goto(void);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index a3e571688421..2a0431a8741c 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -3076,6 +3076,11 @@ bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
return false;
}
+bool __weak bpf_jit_supports_signed_arena_load(void)
+{
+ return false;
+}
+
u64 __weak bpf_arch_uaddress_limit(void)
{
#if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 28f5a7899bd6..792de030e1ad 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20957,10 +20957,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
continue;
case PTR_TO_ARENA:
if (BPF_MODE(insn->code) == BPF_MEMSX) {
- verbose(env, "sign extending loads from arena are not supported yet\n");
- return -EOPNOTSUPP;
+ if (!bpf_jit_supports_signed_arena_load()) {
+ verbose(env, "sign extending loads from arena are not supported yet\n");
+ return -EOPNOTSUPP;
+ }
+ insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32SX | BPF_SIZE(insn->code);
+ } else {
+ insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code);
}
- insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code);
env->prog->aux->num_exentries++;
continue;
default:
--
2.47.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH bpf-next v1] bpf, x86: Add support for signed arena loads
2025-05-09 19:49 [PATCH bpf-next v1] bpf, x86: Add support for signed arena loads Kumar Kartikeya Dwivedi
@ 2025-05-14 0:48 ` Alexei Starovoitov
0 siblings, 0 replies; 2+ messages in thread
From: Alexei Starovoitov @ 2025-05-14 0:48 UTC (permalink / raw)
To: Kumar Kartikeya Dwivedi
Cc: bpf, Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann,
Martin KaFai Lau, Eduard Zingerman, kkd, Kernel Team
On Fri, May 9, 2025 at 12:50 PM Kumar Kartikeya Dwivedi
<memxor@gmail.com> wrote:
>
>
> +/* unused opcode to mark special ldsx instruction. Same as BPF_NOSPEC */
> +#define BPF_PROBE_MEM32SX 0xc0
lgtm. should work.
> +
> /* unused opcode to mark call to interpreter with arguments */
> #define BPF_CALL_ARGS 0xe0
>
> @@ -1138,6 +1141,7 @@ bool bpf_jit_supports_arena(void);
> bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
> bool bpf_jit_supports_private_stack(void);
> bool bpf_jit_supports_timed_may_goto(void);
> +bool bpf_jit_supports_signed_arena_load(void);
> u64 bpf_arch_uaddress_limit(void);
> void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
> u64 arch_bpf_timed_may_goto(void);
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index a3e571688421..2a0431a8741c 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -3076,6 +3076,11 @@ bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
> return false;
> }
>
> +bool __weak bpf_jit_supports_signed_arena_load(void)
> +{
> + return false;
> +}
Instead of introducing a new weak function, let's use bpf_jit_supports_insn() ?
We were planning to convert other weak functions to it,
but the work wasn't done.
At least let's not create more tech debt to clean up later.
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2025-05-14 0:48 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-05-09 19:49 [PATCH bpf-next v1] bpf, x86: Add support for signed arena loads Kumar Kartikeya Dwivedi
2025-05-14 0:48 ` Alexei Starovoitov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).