BPF List
 help / color / mirror / Atom feed
* [PATCH bpf-next] bpf: arm64: Fix panic due to missing BTI at indirect jump targets
@ 2025-11-27 14:03 Xu Kuohai
  2025-11-27 16:11 ` Anton Protopopov
  0 siblings, 1 reply; 6+ messages in thread
From: Xu Kuohai @ 2025-11-27 14:03 UTC (permalink / raw)
  To: bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Yonghong Song, Puranjay Mohan,
	Catalin Marinas, Will Deacon

From: Xu Kuohai <xukuohai@huawei.com>

When BTI is enabled, the indirect jump selftest triggers BTI exception:

Internal error: Oops - BTI: 0000000036000003 [#1]  SMP
...
Call trace:
 bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
 bpf_prog_run_pin_on_cpu+0x140/0x468
 bpf_prog_test_run_syscall+0x280/0x3b8
 bpf_prog_test_run+0x22c/0x2c0
 __sys_bpf+0x4d8/0x5c8
 __arm64_sys_bpf+0x88/0xa8
 invoke_syscall+0x80/0x220
 el0_svc_common+0x160/0x1d0
 do_el0_svc+0x54/0x70
 el0_svc+0x54/0x188
 el0t_64_sync_handler+0x84/0x130
 el0t_64_sync+0x198/0x1a0

This happens because no BTI instruction is generated by the JIT for
indirect jump targets.

Fix it by emitting BTI instruction for every possible indirect jump
targets when BTI is enabled. The targets are identified by traversing
all instruction arrays used by the BPF program, since indirect jump
targets can only be read from instruction arrays.

Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
---
 arch/arm64/net/bpf_jit_comp.c | 20 ++++++++++++++++
 include/linux/bpf.h           | 12 ++++++++++
 kernel/bpf/bpf_insn_array.c   | 43 +++++++++++++++++++++++++++++++++++
 3 files changed, 75 insertions(+)

diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 929123a5431a..f546df886049 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -78,6 +78,7 @@ static const int bpf2a64[] = {
 
 struct jit_ctx {
 	const struct bpf_prog *prog;
+	unsigned long *indirect_targets;
 	int idx;
 	int epilogue_offset;
 	int *offset;
@@ -1199,6 +1200,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
 	return 0;
 }
 
+static bool maybe_indirect_target(int insn_off, unsigned long *targets_bitmap)
+{
+	return targets_bitmap && test_bit(insn_off, targets_bitmap);
+}
+
 /* JITs an eBPF instruction.
  * Returns:
  * 0  - successfully JITed an 8-byte eBPF instruction.
@@ -1231,6 +1237,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 	int ret;
 	bool sign_extend;
 
+	if (maybe_indirect_target(i, ctx->indirect_targets))
+		emit_bti(A64_BTI_J, ctx);
+
 	switch (code) {
 	/* dst = src */
 	case BPF_ALU | BPF_MOV | BPF_X:
@@ -2085,6 +2094,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	memset(&ctx, 0, sizeof(ctx));
 	ctx.prog = prog;
 
+	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && bpf_prog_has_insn_array(prog)) {
+		ctx.indirect_targets = kvcalloc(BITS_TO_LONGS(prog->len), sizeof(unsigned long),
+						GFP_KERNEL);
+		if (ctx.indirect_targets == NULL) {
+			prog = orig_prog;
+			goto out_off;
+		}
+		bpf_prog_collect_indirect_targets(prog, ctx.indirect_targets);
+	}
+
 	ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
 	if (ctx.offset == NULL) {
 		prog = orig_prog;
@@ -2248,6 +2267,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 			prog->aux->priv_stack_ptr = NULL;
 		}
 		kvfree(ctx.offset);
+		kvfree(ctx.indirect_targets);
 out_priv_stack:
 		kfree(jit_data);
 		prog->aux->jit_data = NULL;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index a9b788c7b4aa..c81eb54f7b26 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -3822,11 +3822,23 @@ void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
 
 #ifdef CONFIG_BPF_SYSCALL
 void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
+void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap);
+bool bpf_prog_has_insn_array(const struct bpf_prog *prog);
 #else
 static inline void
 bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
 {
 }
+
+static inline bool bpf_prog_has_insn_array(const struct bpf_prog *prog)
+{
+	return false;
+}
+
+static inline void
+bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
+{
+}
 #endif
 
 #endif /* _LINUX_BPF_H */
diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
index 61ce52882632..ed20b186a1f5 100644
--- a/kernel/bpf/bpf_insn_array.c
+++ b/kernel/bpf/bpf_insn_array.c
@@ -299,3 +299,46 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
 		}
 	}
 }
+
+bool bpf_prog_has_insn_array(const struct bpf_prog *prog)
+{
+	int i;
+
+	for (i = 0; i < prog->aux->used_map_cnt; i++) {
+		if (is_insn_array(prog->aux->used_maps[i]))
+			return true;
+	}
+	return false;
+}
+
+/*
+ * This function collects possible indirect jump targets in a BPF program. Since indirect jump
+ * targets can only be read from instruction arrays, it traverses all instruction arrays used
+ * by @prog. For each instruction in the arrays, it sets the corresponding bit in @bitmap.
+ */
+void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
+{
+	struct bpf_insn_array *insn_array;
+	struct bpf_map *map;
+	u32 xlated_off;
+	int i, j;
+
+	for (i = 0; i < prog->aux->used_map_cnt; i++) {
+		map = prog->aux->used_maps[i];
+		if (!is_insn_array(map))
+			continue;
+
+		insn_array = cast_insn_array(map);
+		for (j = 0; j < map->max_entries; j++) {
+			xlated_off = insn_array->values[j].xlated_off;
+			if (xlated_off == INSN_DELETED)
+				continue;
+			if (xlated_off < prog->aux->subprog_start)
+				continue;
+			xlated_off -= prog->aux->subprog_start;
+			if (xlated_off >= prog->len)
+				continue;
+			__set_bit(xlated_off, bitmap);
+		}
+	}
+}
-- 
2.47.3


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next] bpf: arm64: Fix panic due to missing BTI at indirect jump targets
  2025-11-27 14:03 [PATCH bpf-next] bpf: arm64: Fix panic due to missing BTI at indirect jump targets Xu Kuohai
@ 2025-11-27 16:11 ` Anton Protopopov
  2025-11-27 16:46   ` Anton Protopopov
  2025-11-28  7:16   ` Xu Kuohai
  0 siblings, 2 replies; 6+ messages in thread
From: Anton Protopopov @ 2025-11-27 16:11 UTC (permalink / raw)
  To: Xu Kuohai
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Yonghong Song, Puranjay Mohan,
	Catalin Marinas, Will Deacon

On 25/11/27 10:03PM, Xu Kuohai wrote:
> From: Xu Kuohai <xukuohai@huawei.com>

This patch doesn't apply to bpf-next due to
https://lore.kernel.org/bpf/20251125145857.98134-2-leon.hwang@linux.dev/T/#u

> When BTI is enabled, the indirect jump selftest triggers BTI exception:
> 
> Internal error: Oops - BTI: 0000000036000003 [#1]  SMP
> ...
> Call trace:
>  bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
>  bpf_prog_run_pin_on_cpu+0x140/0x468
>  bpf_prog_test_run_syscall+0x280/0x3b8
>  bpf_prog_test_run+0x22c/0x2c0
>  __sys_bpf+0x4d8/0x5c8
>  __arm64_sys_bpf+0x88/0xa8
>  invoke_syscall+0x80/0x220
>  el0_svc_common+0x160/0x1d0
>  do_el0_svc+0x54/0x70
>  el0_svc+0x54/0x188
>  el0t_64_sync_handler+0x84/0x130
>  el0t_64_sync+0x198/0x1a0
> 
> This happens because no BTI instruction is generated by the JIT for
> indirect jump targets.
> 
> Fix it by emitting BTI instruction for every possible indirect jump
> targets when BTI is enabled. The targets are identified by traversing
> all instruction arrays used by the BPF program, since indirect jump
> targets can only be read from instruction arrays.
> 
> Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
> Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
> ---
>  arch/arm64/net/bpf_jit_comp.c | 20 ++++++++++++++++
>  include/linux/bpf.h           | 12 ++++++++++
>  kernel/bpf/bpf_insn_array.c   | 43 +++++++++++++++++++++++++++++++++++
>  3 files changed, 75 insertions(+)
> 
> diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> index 929123a5431a..f546df886049 100644
> --- a/arch/arm64/net/bpf_jit_comp.c
> +++ b/arch/arm64/net/bpf_jit_comp.c
> @@ -78,6 +78,7 @@ static const int bpf2a64[] = {
>  
>  struct jit_ctx {
>  	const struct bpf_prog *prog;
> +	unsigned long *indirect_targets;
>  	int idx;
>  	int epilogue_offset;
>  	int *offset;
> @@ -1199,6 +1200,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
>  	return 0;
>  }
>  
> +static bool maybe_indirect_target(int insn_off, unsigned long *targets_bitmap)

Why "maybe"? (But also see below.)

> +{
> +	return targets_bitmap && test_bit(insn_off, targets_bitmap);
> +}
> +
>  /* JITs an eBPF instruction.
>   * Returns:
>   * 0  - successfully JITed an 8-byte eBPF instruction.
> @@ -1231,6 +1237,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
>  	int ret;
>  	bool sign_extend;
>  
> +	if (maybe_indirect_target(i, ctx->indirect_targets))
> +		emit_bti(A64_BTI_J, ctx);
> +
>  	switch (code) {
>  	/* dst = src */
>  	case BPF_ALU | BPF_MOV | BPF_X:
> @@ -2085,6 +2094,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
>  	memset(&ctx, 0, sizeof(ctx));
>  	ctx.prog = prog;
>  
> +	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && bpf_prog_has_insn_array(prog)) {
> +		ctx.indirect_targets = kvcalloc(BITS_TO_LONGS(prog->len), sizeof(unsigned long),
> +						GFP_KERNEL);

It's allocated here on every run, but freed below only when 
!prog->is_func || extra_pass.

> +		if (ctx.indirect_targets == NULL) {
> +			prog = orig_prog;
> +			goto out_off;
> +		}
> +		bpf_prog_collect_indirect_targets(prog, ctx.indirect_targets);
> +	}
> +
>  	ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
>  	if (ctx.offset == NULL) {
>  		prog = orig_prog;
> @@ -2248,6 +2267,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
>  			prog->aux->priv_stack_ptr = NULL;
>  		}
>  		kvfree(ctx.offset);
> +		kvfree(ctx.indirect_targets);
>  out_priv_stack:
>  		kfree(jit_data);
>  		prog->aux->jit_data = NULL;
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index a9b788c7b4aa..c81eb54f7b26 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -3822,11 +3822,23 @@ void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
>  
>  #ifdef CONFIG_BPF_SYSCALL
>  void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap);
> +bool bpf_prog_has_insn_array(const struct bpf_prog *prog);
>  #else
>  static inline void
>  bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
>  {
>  }
> +
> +static inline bool bpf_prog_has_insn_array(const struct bpf_prog *prog)
> +{
> +	return false;
> +}
> +
> +static inline void
> +bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
> +{
> +}
>  #endif
>  
>  #endif /* _LINUX_BPF_H */
> diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
> index 61ce52882632..ed20b186a1f5 100644
> --- a/kernel/bpf/bpf_insn_array.c
> +++ b/kernel/bpf/bpf_insn_array.c
> @@ -299,3 +299,46 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
>  		}
>  	}
>  }
> +
> +bool bpf_prog_has_insn_array(const struct bpf_prog *prog)
> +{
> +	int i;
> +
> +	for (i = 0; i < prog->aux->used_map_cnt; i++) {
> +		if (is_insn_array(prog->aux->used_maps[i]))
> +			return true;
> +	}
> +	return false;
> +}

I think a different check is needed here (and a different function
name, smth like "bpf_prog_has_indirect_jumps"), and a different
algorithm to collect jump targets in the chunk below. A program can
have instruction arrays not related to indirect jumps (see, e.g.,
bpf_insn_array selftests + in future insns arrays will be used to
also support other functionality). As an extreme case, an insn array
can point to every instruction in a prog, thus a BTI will be
generated for every instruction.

In verifier it is used a bit differently, namely, all insn arrays for
a given subprog are collected when an indirect jump is encountered
(and non-deterministic only in check_cfg). Later in verification, an
exact map is used, so this is not a problem.

Initially I wanted to have a map flag (in map_extra) to distingiush between
different types of instruction arrays ("plane ones", "jump targets",
"call targets", "static keys"), but Andrii wasn't happy with it,
so eventually I've dropped it. Maybe it is worth adding it until
the code is merged to upstream? Eduard, Alexei, wdyt?

> +
> +/*
> + * This function collects possible indirect jump targets in a BPF program. Since indirect jump
> + * targets can only be read from instruction arrays, it traverses all instruction arrays used
> + * by @prog. For each instruction in the arrays, it sets the corresponding bit in @bitmap.
> + */
> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
> +{
> +	struct bpf_insn_array *insn_array;
> +	struct bpf_map *map;
> +	u32 xlated_off;
> +	int i, j;
> +
> +	for (i = 0; i < prog->aux->used_map_cnt; i++) {
> +		map = prog->aux->used_maps[i];
> +		if (!is_insn_array(map))
> +			continue;
> +
> +		insn_array = cast_insn_array(map);
> +		for (j = 0; j < map->max_entries; j++) {
> +			xlated_off = insn_array->values[j].xlated_off;
> +			if (xlated_off == INSN_DELETED)
> +				continue;
> +			if (xlated_off < prog->aux->subprog_start)
> +				continue;
> +			xlated_off -= prog->aux->subprog_start;
> +			if (xlated_off >= prog->len)
> +				continue;
> +			__set_bit(xlated_off, bitmap);
> +		}
> +	}
> +}
> -- 
> 2.47.3
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next] bpf: arm64: Fix panic due to missing BTI at indirect jump targets
  2025-11-27 16:11 ` Anton Protopopov
@ 2025-11-27 16:46   ` Anton Protopopov
  2025-12-04  6:47     ` Xu Kuohai
  2025-11-28  7:16   ` Xu Kuohai
  1 sibling, 1 reply; 6+ messages in thread
From: Anton Protopopov @ 2025-11-27 16:46 UTC (permalink / raw)
  To: Xu Kuohai
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Yonghong Song, Puranjay Mohan,
	Catalin Marinas, Will Deacon

On 25/11/27 04:11PM, Anton Protopopov wrote:
> On 25/11/27 10:03PM, Xu Kuohai wrote:
> > From: Xu Kuohai <xukuohai@huawei.com>
> 
> This patch doesn't apply to bpf-next due to
> https://lore.kernel.org/bpf/20251125145857.98134-2-leon.hwang@linux.dev/T/#u
> 
> > When BTI is enabled, the indirect jump selftest triggers BTI exception:
> > 
> > Internal error: Oops - BTI: 0000000036000003 [#1]  SMP
> > ...
> > Call trace:
> >  bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
> >  bpf_prog_run_pin_on_cpu+0x140/0x468
> >  bpf_prog_test_run_syscall+0x280/0x3b8
> >  bpf_prog_test_run+0x22c/0x2c0
> >  __sys_bpf+0x4d8/0x5c8
> >  __arm64_sys_bpf+0x88/0xa8
> >  invoke_syscall+0x80/0x220
> >  el0_svc_common+0x160/0x1d0
> >  do_el0_svc+0x54/0x70
> >  el0_svc+0x54/0x188
> >  el0t_64_sync_handler+0x84/0x130
> >  el0t_64_sync+0x198/0x1a0
> > 
> > This happens because no BTI instruction is generated by the JIT for
> > indirect jump targets.
> > 
> > Fix it by emitting BTI instruction for every possible indirect jump
> > targets when BTI is enabled. The targets are identified by traversing
> > all instruction arrays used by the BPF program, since indirect jump
> > targets can only be read from instruction arrays.
> > 
> > Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
> > Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
> > ---
> >  arch/arm64/net/bpf_jit_comp.c | 20 ++++++++++++++++
> >  include/linux/bpf.h           | 12 ++++++++++
> >  kernel/bpf/bpf_insn_array.c   | 43 +++++++++++++++++++++++++++++++++++
> >  3 files changed, 75 insertions(+)
> > 
> > diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> > index 929123a5431a..f546df886049 100644
> > --- a/arch/arm64/net/bpf_jit_comp.c
> > +++ b/arch/arm64/net/bpf_jit_comp.c
> > @@ -78,6 +78,7 @@ static const int bpf2a64[] = {
> >  
> >  struct jit_ctx {
> >  	const struct bpf_prog *prog;
> > +	unsigned long *indirect_targets;
> >  	int idx;
> >  	int epilogue_offset;
> >  	int *offset;
> > @@ -1199,6 +1200,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
> >  	return 0;
> >  }
> >  
> > +static bool maybe_indirect_target(int insn_off, unsigned long *targets_bitmap)
> 
> Why "maybe"? (But also see below.)
> 
> > +{
> > +	return targets_bitmap && test_bit(insn_off, targets_bitmap);
> > +}
> > +
> >  /* JITs an eBPF instruction.
> >   * Returns:
> >   * 0  - successfully JITed an 8-byte eBPF instruction.
> > @@ -1231,6 +1237,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
> >  	int ret;
> >  	bool sign_extend;
> >  
> > +	if (maybe_indirect_target(i, ctx->indirect_targets))
> > +		emit_bti(A64_BTI_J, ctx);
> > +
> >  	switch (code) {
> >  	/* dst = src */
> >  	case BPF_ALU | BPF_MOV | BPF_X:
> > @@ -2085,6 +2094,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> >  	memset(&ctx, 0, sizeof(ctx));
> >  	ctx.prog = prog;
> >  
> > +	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && bpf_prog_has_insn_array(prog)) {
> > +		ctx.indirect_targets = kvcalloc(BITS_TO_LONGS(prog->len), sizeof(unsigned long),
> > +						GFP_KERNEL);
> 
> It's allocated here on every run, but freed below only when 
> !prog->is_func || extra_pass.
> 
> > +		if (ctx.indirect_targets == NULL) {
> > +			prog = orig_prog;
> > +			goto out_off;
> > +		}
> > +		bpf_prog_collect_indirect_targets(prog, ctx.indirect_targets);
> > +	}
> > +
> >  	ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
> >  	if (ctx.offset == NULL) {
> >  		prog = orig_prog;
> > @@ -2248,6 +2267,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> >  			prog->aux->priv_stack_ptr = NULL;
> >  		}
> >  		kvfree(ctx.offset);
> > +		kvfree(ctx.indirect_targets);
> >  out_priv_stack:
> >  		kfree(jit_data);
> >  		prog->aux->jit_data = NULL;
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > index a9b788c7b4aa..c81eb54f7b26 100644
> > --- a/include/linux/bpf.h
> > +++ b/include/linux/bpf.h
> > @@ -3822,11 +3822,23 @@ void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
> >  
> >  #ifdef CONFIG_BPF_SYSCALL
> >  void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
> > +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap);
> > +bool bpf_prog_has_insn_array(const struct bpf_prog *prog);
> >  #else
> >  static inline void
> >  bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
> >  {
> >  }
> > +
> > +static inline bool bpf_prog_has_insn_array(const struct bpf_prog *prog)
> > +{
> > +	return false;
> > +}
> > +
> > +static inline void
> > +bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
> > +{
> > +}
> >  #endif
> >  
> >  #endif /* _LINUX_BPF_H */
> > diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
> > index 61ce52882632..ed20b186a1f5 100644
> > --- a/kernel/bpf/bpf_insn_array.c
> > +++ b/kernel/bpf/bpf_insn_array.c
> > @@ -299,3 +299,46 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
> >  		}
> >  	}
> >  }
> > +
> > +bool bpf_prog_has_insn_array(const struct bpf_prog *prog)
> > +{
> > +	int i;
> > +
> > +	for (i = 0; i < prog->aux->used_map_cnt; i++) {
> > +		if (is_insn_array(prog->aux->used_maps[i]))
> > +			return true;
> > +	}
> > +	return false;
> > +}
> 
> I think a different check is needed here (and a different function
> name, smth like "bpf_prog_has_indirect_jumps"), and a different
> algorithm to collect jump targets in the chunk below. A program can
> have instruction arrays not related to indirect jumps (see, e.g.,
> bpf_insn_array selftests + in future insns arrays will be used to
> also support other functionality). As an extreme case, an insn array
> can point to every instruction in a prog, thus a BTI will be
> generated for every instruction.
> 
> In verifier it is used a bit differently, namely, all insn arrays for
> a given subprog are collected when an indirect jump is encountered
> (and non-deterministic only in check_cfg). Later in verification, an
> exact map is used, so this is not a problem.
> 
> Initially I wanted to have a map flag (in map_extra) to distingiush between
> different types of instruction arrays ("plane ones", "jump targets",
> "call targets", "static keys"), but Andrii wasn't happy with it,
> so eventually I've dropped it. Maybe it is worth adding it until
> the code is merged to upstream? Eduard, Alexei, wdyt?

Actually, this is even better to mark a map as containing indirect
jump targets in check_indirect_jump(). This will be the most precise
set of targets, and won't require any userspace-visible changes/flags.

Something like this:

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 6498be4c44f8..c2d708213330 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -292,6 +292,10 @@ struct bpf_map_owner {
 	enum bpf_attach_type expected_attach_type;
 };
 
+/* map_subtype values for map_type BPF_MAP_TYPE_INSN_ARRAY */
+#define BPF_INSN_ARRAY_VOID		0
+#define BPF_INSN_ARRAY_JUMP_TABLE	1
+
 struct bpf_map {
 	u8 sha[SHA256_DIGEST_SIZE];
 	const struct bpf_map_ops *ops;
@@ -331,6 +335,7 @@ struct bpf_map {
 	bool frozen; /* write-once; write-protected by freeze_mutex */
 	bool free_after_mult_rcu_gp;
 	bool free_after_rcu_gp;
+	u32 map_subtype; /* defined per map type */
 	atomic64_t sleepable_refcnt;
 	s64 __percpu *elem_count;
 	u64 cookie; /* write-once */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 766695491bc5..60bbd32e793a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20293,6 +20293,12 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
 		return -EINVAL;
 	}
 
+	/*
+	 * Explicitly mark this map as a jump table such that it can be
+	 * distinguished later from other instruction arrays
+	 */
+	map->map_subtype = BPF_INSN_ARRAY_JUMP_TABLE;
+
 	for (i = 0; i < n - 1; i++) {
 		other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
 					  env->insn_idx, env->cur_state->speculative);

> > +
> > +/*
> > + * This function collects possible indirect jump targets in a BPF program. Since indirect jump
> > + * targets can only be read from instruction arrays, it traverses all instruction arrays used
> > + * by @prog. For each instruction in the arrays, it sets the corresponding bit in @bitmap.
> > + */
> > +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
> > +{
> > +	struct bpf_insn_array *insn_array;
> > +	struct bpf_map *map;
> > +	u32 xlated_off;
> > +	int i, j;
> > +
> > +	for (i = 0; i < prog->aux->used_map_cnt; i++) {
> > +		map = prog->aux->used_maps[i];
> > +		if (!is_insn_array(map))
> > +			continue;
> > +
> > +		insn_array = cast_insn_array(map);
> > +		for (j = 0; j < map->max_entries; j++) {
> > +			xlated_off = insn_array->values[j].xlated_off;
> > +			if (xlated_off == INSN_DELETED)
> > +				continue;
> > +			if (xlated_off < prog->aux->subprog_start)
> > +				continue;
> > +			xlated_off -= prog->aux->subprog_start;
> > +			if (xlated_off >= prog->len)
> > +				continue;
> > +			__set_bit(xlated_off, bitmap);
> > +		}
> > +	}
> > +}
> > -- 
> > 2.47.3
> > 

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next] bpf: arm64: Fix panic due to missing BTI at indirect jump targets
  2025-11-27 16:11 ` Anton Protopopov
  2025-11-27 16:46   ` Anton Protopopov
@ 2025-11-28  7:16   ` Xu Kuohai
  1 sibling, 0 replies; 6+ messages in thread
From: Xu Kuohai @ 2025-11-28  7:16 UTC (permalink / raw)
  To: Anton Protopopov
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Yonghong Song, Puranjay Mohan,
	Catalin Marinas, Will Deacon

On 11/28/2025 12:11 AM, Anton Protopopov wrote:
> On 25/11/27 10:03PM, Xu Kuohai wrote:
>> From: Xu Kuohai <xukuohai@huawei.com>
> 
> This patch doesn't apply to bpf-next due to
> https://lore.kernel.org/bpf/20251125145857.98134-2-leon.hwang@linux.dev/T/#u
>

Oops, thanks for the notice. I'll rebase on the latest bpf-next.

>> When BTI is enabled, the indirect jump selftest triggers BTI exception:
>>
>> Internal error: Oops - BTI: 0000000036000003 [#1]  SMP
>> ...
>> Call trace:
>>   bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
>>   bpf_prog_run_pin_on_cpu+0x140/0x468
>>   bpf_prog_test_run_syscall+0x280/0x3b8
>>   bpf_prog_test_run+0x22c/0x2c0
>>   __sys_bpf+0x4d8/0x5c8
>>   __arm64_sys_bpf+0x88/0xa8
>>   invoke_syscall+0x80/0x220
>>   el0_svc_common+0x160/0x1d0
>>   do_el0_svc+0x54/0x70
>>   el0_svc+0x54/0x188
>>   el0t_64_sync_handler+0x84/0x130
>>   el0t_64_sync+0x198/0x1a0
>>
>> This happens because no BTI instruction is generated by the JIT for
>> indirect jump targets.
>>
>> Fix it by emitting BTI instruction for every possible indirect jump
>> targets when BTI is enabled. The targets are identified by traversing
>> all instruction arrays used by the BPF program, since indirect jump
>> targets can only be read from instruction arrays.
>>
>> Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
>> Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
>> ---
>>   arch/arm64/net/bpf_jit_comp.c | 20 ++++++++++++++++
>>   include/linux/bpf.h           | 12 ++++++++++
>>   kernel/bpf/bpf_insn_array.c   | 43 +++++++++++++++++++++++++++++++++++
>>   3 files changed, 75 insertions(+)
>>
>> diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
>> index 929123a5431a..f546df886049 100644
>> --- a/arch/arm64/net/bpf_jit_comp.c
>> +++ b/arch/arm64/net/bpf_jit_comp.c
>> @@ -78,6 +78,7 @@ static const int bpf2a64[] = {
>>   
>>   struct jit_ctx {
>>   	const struct bpf_prog *prog;
>> +	unsigned long *indirect_targets;
>>   	int idx;
>>   	int epilogue_offset;
>>   	int *offset;
>> @@ -1199,6 +1200,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
>>   	return 0;
>>   }
>>   
>> +static bool maybe_indirect_target(int insn_off, unsigned long *targets_bitmap)
> 
> Why "maybe"? (But also see below.)
>

IIUC, although indirect jump targets are loaded from instruction arrays,
not all instructions pointed to by an instruction array are actually used
as indirect jump targets. It is legal for indirect jumps to target only
some of the instructions.

>> +{
>> +	return targets_bitmap && test_bit(insn_off, targets_bitmap);
>> +}
>> +
>>   /* JITs an eBPF instruction.
>>    * Returns:
>>    * 0  - successfully JITed an 8-byte eBPF instruction.
>> @@ -1231,6 +1237,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
>>   	int ret;
>>   	bool sign_extend;
>>   
>> +	if (maybe_indirect_target(i, ctx->indirect_targets))
>> +		emit_bti(A64_BTI_J, ctx);
>> +
>>   	switch (code) {
>>   	/* dst = src */
>>   	case BPF_ALU | BPF_MOV | BPF_X:
>> @@ -2085,6 +2094,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
>>   	memset(&ctx, 0, sizeof(ctx));
>>   	ctx.prog = prog;
>>   
>> +	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && bpf_prog_has_insn_array(prog)) {
>> +		ctx.indirect_targets = kvcalloc(BITS_TO_LONGS(prog->len), sizeof(unsigned long),
>> +						GFP_KERNEL);
> 
> It's allocated here on every run, but freed below only when
> !prog->is_func || extra_pass.
>

Well, actually, the bitmap is allocated in the first pass of the JIT
and freed in the last pass. For passes other than the first, this code
is skipped via the statement goto skip_init_ctx, which is unaffected
by this patch.

Indeed, there are quite a few gotos in bpf_int_jit_compile, which makes
the code somewhat hard to follow. It seems like it’s time for some
cleanup, and I’ll try to work on that.

>> +		if (ctx.indirect_targets == NULL) {
>> +			prog = orig_prog;
>> +			goto out_off;
>> +		}
>> +		bpf_prog_collect_indirect_targets(prog, ctx.indirect_targets);
>> +	}
>> +
>>   	ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
>>   	if (ctx.offset == NULL) {
>>   		prog = orig_prog;
>> @@ -2248,6 +2267,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
>>   			prog->aux->priv_stack_ptr = NULL;
>>   		}
>>   		kvfree(ctx.offset);
>> +		kvfree(ctx.indirect_targets);
>>   out_priv_stack:
>>   		kfree(jit_data);
>>   		prog->aux->jit_data = NULL;
>> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
>> index a9b788c7b4aa..c81eb54f7b26 100644
>> --- a/include/linux/bpf.h
>> +++ b/include/linux/bpf.h
>> @@ -3822,11 +3822,23 @@ void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
>>   
>>   #ifdef CONFIG_BPF_SYSCALL
>>   void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
>> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap);
>> +bool bpf_prog_has_insn_array(const struct bpf_prog *prog);
>>   #else
>>   static inline void
>>   bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
>>   {
>>   }
>> +
>> +static inline bool bpf_prog_has_insn_array(const struct bpf_prog *prog)
>> +{
>> +	return false;
>> +}
>> +
>> +static inline void
>> +bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
>> +{
>> +}
>>   #endif
>>   
>>   #endif /* _LINUX_BPF_H */
>> diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
>> index 61ce52882632..ed20b186a1f5 100644
>> --- a/kernel/bpf/bpf_insn_array.c
>> +++ b/kernel/bpf/bpf_insn_array.c
>> @@ -299,3 +299,46 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
>>   		}
>>   	}
>>   }
>> +
>> +bool bpf_prog_has_insn_array(const struct bpf_prog *prog)
>> +{
>> +	int i;
>> +
>> +	for (i = 0; i < prog->aux->used_map_cnt; i++) {
>> +		if (is_insn_array(prog->aux->used_maps[i]))
>> +			return true;
>> +	}
>> +	return false;
>> +}
> 
> I think a different check is needed here (and a different function
> name, smth like "bpf_prog_has_indirect_jumps"),and a different
> algorithm to collect jump targets in the chunk below. A program can
> have instruction arrays not related to indirect jumps (see, e.g.,
> bpf_insn_array selftests + in future insns arrays will be used to
> also support other functionality). As an extreme case, an insn array
> can point to every instruction in a prog, thus a BTI will be
> generated for every instruction.
> 
> In verifier it is used a bit differently, namely, all insn arrays for
> a given subprog are collected when an indirect jump is encountered
> (and non-deterministic only in check_cfg). Later in verification, an
> exact map is used, so this is not a problem.
>

ACK, we should leverage the information derived by the verifier to exclude
instruction arrays that are not used for indirect jumps.

> Initially I wanted to have a map flag (in map_extra) to distingiush between
> different types of instruction arrays ("plane ones", "jump targets",
> "call targets", "static keys"), but Andrii wasn't happy with it,
> so eventually I've dropped it. Maybe it is worth adding it until
> the code is merged to upstream? Eduard, Alexei, wdyt?
> 
>> +
>> +/*
>> + * This function collects possible indirect jump targets in a BPF program. Since indirect jump
>> + * targets can only be read from instruction arrays, it traverses all instruction arrays used
>> + * by @prog. For each instruction in the arrays, it sets the corresponding bit in @bitmap.
>> + */
>> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
>> +{
>> +	struct bpf_insn_array *insn_array;
>> +	struct bpf_map *map;
>> +	u32 xlated_off;
>> +	int i, j;
>> +
>> +	for (i = 0; i < prog->aux->used_map_cnt; i++) {
>> +		map = prog->aux->used_maps[i];
>> +		if (!is_insn_array(map))
>> +			continue;
>> +
>> +		insn_array = cast_insn_array(map);
>> +		for (j = 0; j < map->max_entries; j++) {
>> +			xlated_off = insn_array->values[j].xlated_off;
>> +			if (xlated_off == INSN_DELETED)
>> +				continue;
>> +			if (xlated_off < prog->aux->subprog_start)
>> +				continue;
>> +			xlated_off -= prog->aux->subprog_start;
>> +			if (xlated_off >= prog->len)
>> +				continue;
>> +			__set_bit(xlated_off, bitmap);
>> +		}
>> +	}
>> +}
>> -- 
>> 2.47.3
>>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next] bpf: arm64: Fix panic due to missing BTI at indirect jump targets
  2025-11-27 16:46   ` Anton Protopopov
@ 2025-12-04  6:47     ` Xu Kuohai
  2025-12-04  8:09       ` Anton Protopopov
  0 siblings, 1 reply; 6+ messages in thread
From: Xu Kuohai @ 2025-12-04  6:47 UTC (permalink / raw)
  To: Anton Protopopov
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Yonghong Song, Puranjay Mohan,
	Catalin Marinas, Will Deacon

On 11/28/2025 12:46 AM, Anton Protopopov wrote:

[...]

>>> diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
>>> index 61ce52882632..ed20b186a1f5 100644
>>> --- a/kernel/bpf/bpf_insn_array.c
>>> +++ b/kernel/bpf/bpf_insn_array.c
>>> @@ -299,3 +299,46 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
>>>   		}
>>>   	}
>>>   }
>>> +
>>> +bool bpf_prog_has_insn_array(const struct bpf_prog *prog)
>>> +{
>>> +	int i;
>>> +
>>> +	for (i = 0; i < prog->aux->used_map_cnt; i++) {
>>> +		if (is_insn_array(prog->aux->used_maps[i]))
>>> +			return true;
>>> +	}
>>> +	return false;
>>> +}
>>
>> I think a different check is needed here (and a different function
>> name, smth like "bpf_prog_has_indirect_jumps"), and a different
>> algorithm to collect jump targets in the chunk below. A program can
>> have instruction arrays not related to indirect jumps (see, e.g.,
>> bpf_insn_array selftests + in future insns arrays will be used to
>> also support other functionality). As an extreme case, an insn array
>> can point to every instruction in a prog, thus a BTI will be
>> generated for every instruction.
>>
>> In verifier it is used a bit differently, namely, all insn arrays for
>> a given subprog are collected when an indirect jump is encountered
>> (and non-deterministic only in check_cfg). Later in verification, an
>> exact map is used, so this is not a problem.
>>
>> Initially I wanted to have a map flag (in map_extra) to distingiush between
>> different types of instruction arrays ("plane ones", "jump targets",
>> "call targets", "static keys"), but Andrii wasn't happy with it,
>> so eventually I've dropped it. Maybe it is worth adding it until
>> the code is merged to upstream? Eduard, Alexei, wdyt?
> 
> Actually, this is even better to mark a map as containing indirect
> jump targets in check_indirect_jump(). This will be the most precise
> set of targets, and won't require any userspace-visible changes/flags.
> 
> Something like this:
> 
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 6498be4c44f8..c2d708213330 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -292,6 +292,10 @@ struct bpf_map_owner {
>   	enum bpf_attach_type expected_attach_type;
>   };
>   
> +/* map_subtype values for map_type BPF_MAP_TYPE_INSN_ARRAY */
> +#define BPF_INSN_ARRAY_VOID		0
> +#define BPF_INSN_ARRAY_JUMP_TABLE	1
> +
>   struct bpf_map {
>   	u8 sha[SHA256_DIGEST_SIZE];
>   	const struct bpf_map_ops *ops;
> @@ -331,6 +335,7 @@ struct bpf_map {
>   	bool frozen; /* write-once; write-protected by freeze_mutex */
>   	bool free_after_mult_rcu_gp;
>   	bool free_after_rcu_gp;
> +	u32 map_subtype; /* defined per map type */
>   	atomic64_t sleepable_refcnt;
>   	s64 __percpu *elem_count;
>   	u64 cookie; /* write-once */
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 766695491bc5..60bbd32e793a 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -20293,6 +20293,12 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
>   		return -EINVAL;
>   	}
>   
> +	/*
> +	 * Explicitly mark this map as a jump table such that it can be
> +	 * distinguished later from other instruction arrays
> +	 */
> +	map->map_subtype = BPF_INSN_ARRAY_JUMP_TABLE;
> +
>   	for (i = 0; i < n - 1; i++) {
>   		other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
>   					  env->insn_idx, env->cur_state->speculative);
> 

Looks good to me. Would you like to submit it as a separate patch, or
shall I include it in my patch with your SOB?


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next] bpf: arm64: Fix panic due to missing BTI at indirect jump targets
  2025-12-04  6:47     ` Xu Kuohai
@ 2025-12-04  8:09       ` Anton Protopopov
  0 siblings, 0 replies; 6+ messages in thread
From: Anton Protopopov @ 2025-12-04  8:09 UTC (permalink / raw)
  To: Xu Kuohai
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Yonghong Song, Puranjay Mohan,
	Catalin Marinas, Will Deacon

On 25/12/04 02:47PM, Xu Kuohai wrote:
> On 11/28/2025 12:46 AM, Anton Protopopov wrote:
> 
> [...]
> 
> > > > diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
> > > > index 61ce52882632..ed20b186a1f5 100644
> > > > --- a/kernel/bpf/bpf_insn_array.c
> > > > +++ b/kernel/bpf/bpf_insn_array.c
> > > > @@ -299,3 +299,46 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
> > > >   		}
> > > >   	}
> > > >   }
> > > > +
> > > > +bool bpf_prog_has_insn_array(const struct bpf_prog *prog)
> > > > +{
> > > > +	int i;
> > > > +
> > > > +	for (i = 0; i < prog->aux->used_map_cnt; i++) {
> > > > +		if (is_insn_array(prog->aux->used_maps[i]))
> > > > +			return true;
> > > > +	}
> > > > +	return false;
> > > > +}
> > > 
> > > I think a different check is needed here (and a different function
> > > name, smth like "bpf_prog_has_indirect_jumps"), and a different
> > > algorithm to collect jump targets in the chunk below. A program can
> > > have instruction arrays not related to indirect jumps (see, e.g.,
> > > bpf_insn_array selftests + in future insns arrays will be used to
> > > also support other functionality). As an extreme case, an insn array
> > > can point to every instruction in a prog, thus a BTI will be
> > > generated for every instruction.
> > > 
> > > In verifier it is used a bit differently, namely, all insn arrays for
> > > a given subprog are collected when an indirect jump is encountered
> > > (and non-deterministic only in check_cfg). Later in verification, an
> > > exact map is used, so this is not a problem.
> > > 
> > > Initially I wanted to have a map flag (in map_extra) to distingiush between
> > > different types of instruction arrays ("plane ones", "jump targets",
> > > "call targets", "static keys"), but Andrii wasn't happy with it,
> > > so eventually I've dropped it. Maybe it is worth adding it until
> > > the code is merged to upstream? Eduard, Alexei, wdyt?
> > 
> > Actually, this is even better to mark a map as containing indirect
> > jump targets in check_indirect_jump(). This will be the most precise
> > set of targets, and won't require any userspace-visible changes/flags.
> > 
> > Something like this:
> > 
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > index 6498be4c44f8..c2d708213330 100644
> > --- a/include/linux/bpf.h
> > +++ b/include/linux/bpf.h
> > @@ -292,6 +292,10 @@ struct bpf_map_owner {
> >   	enum bpf_attach_type expected_attach_type;
> >   };
> > +/* map_subtype values for map_type BPF_MAP_TYPE_INSN_ARRAY */
> > +#define BPF_INSN_ARRAY_VOID		0
> > +#define BPF_INSN_ARRAY_JUMP_TABLE	1
> > +
> >   struct bpf_map {
> >   	u8 sha[SHA256_DIGEST_SIZE];
> >   	const struct bpf_map_ops *ops;
> > @@ -331,6 +335,7 @@ struct bpf_map {
> >   	bool frozen; /* write-once; write-protected by freeze_mutex */
> >   	bool free_after_mult_rcu_gp;
> >   	bool free_after_rcu_gp;
> > +	u32 map_subtype; /* defined per map type */
> >   	atomic64_t sleepable_refcnt;
> >   	s64 __percpu *elem_count;
> >   	u64 cookie; /* write-once */
> > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> > index 766695491bc5..60bbd32e793a 100644
> > --- a/kernel/bpf/verifier.c
> > +++ b/kernel/bpf/verifier.c
> > @@ -20293,6 +20293,12 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
> >   		return -EINVAL;
> >   	}
> > +	/*
> > +	 * Explicitly mark this map as a jump table such that it can be
> > +	 * distinguished later from other instruction arrays
> > +	 */
> > +	map->map_subtype = BPF_INSN_ARRAY_JUMP_TABLE;
> > +
> >   	for (i = 0; i < n - 1; i++) {
> >   		other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
> >   					  env->insn_idx, env->cur_state->speculative);
> > 
> 
> Looks good to me. Would you like to submit it as a separate patch, or
> shall I include it in my patch with your SOB?

Ok if you just include it, makes sense as part of your changes.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2025-12-04  8:02 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-27 14:03 [PATCH bpf-next] bpf: arm64: Fix panic due to missing BTI at indirect jump targets Xu Kuohai
2025-11-27 16:11 ` Anton Protopopov
2025-11-27 16:46   ` Anton Protopopov
2025-12-04  6:47     ` Xu Kuohai
2025-12-04  8:09       ` Anton Protopopov
2025-11-28  7:16   ` Xu Kuohai

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox