BPF List
 help / color / mirror / Atom feed
From: Yonghong Song <yonghong.song@linux.dev>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
	Andrii Nakryiko <andrii@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	"Jose E . Marchesi" <jose.marchesi@oracle.com>,
	kernel-team@fb.com, Martin KaFai Lau <martin.lau@kernel.org>,
	Puranjay Mohan <puranjay@kernel.org>
Subject: [PATCH bpf-next v2 22/23] bpf, arm64: Add JIT support for stack arguments
Date: Thu,  7 May 2026 14:32:01 -0700	[thread overview]
Message-ID: <20260507213201.1140284-1-yonghong.song@linux.dev> (raw)
In-Reply-To: <20260507212942.1122000-1-yonghong.song@linux.dev>

From: Puranjay Mohan <puranjay@kernel.org>

Implement stack argument passing for BPF-to-BPF and kfunc calls with
more than 5 parameters on arm64, following the AAPCS64 calling
convention.

BPF R1-R5 already map to x0-x4. With BPF_REG_0 moved to x8 by the
previous commit, x5-x7 are free for arguments 6-8. Arguments 9-12
spill onto the stack at [SP+0], [SP+8], ... and the callee reads
them from [FP+16], [FP+24], ... (above the saved FP/LR pair).

BPF convention uses fixed offsets from BPF_REG_PARAMS (r11): off=-8 is
always arg 6, off=-16 arg 7, etc. The verifier invalidates all outgoing
stack arg slots after each call, so the compiler must re-store before
every call. This means x5-x7 don't need to be saved on stack.

Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
---
 arch/arm64/net/bpf_jit_comp.c | 87 ++++++++++++++++++++++++++++++++++-
 1 file changed, 86 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 085e650662e3..9b9d2501d2d3 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -86,6 +86,7 @@ struct jit_ctx {
 	__le32 *image;
 	__le32 *ro_image;
 	u32 stack_size;
+	u16 stack_arg_size;
 	u64 user_vm_start;
 	u64 arena_vm_start;
 	bool fp_used;
@@ -533,13 +534,19 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
 	 *                        |     |
 	 *                        +-----+ <= (BPF_FP - prog->aux->stack_depth)
 	 *                        |RSVD | padding
-	 * current A64_SP =>      +-----+ <= (BPF_FP - ctx->stack_size)
+	 *                        +-----+ <= (BPF_FP - ctx->stack_size)
+	 *                        |     |
+	 *                        | ... | outgoing stack args (9+, if any)
+	 *                        |     |
+	 * current A64_SP =>      +-----+
 	 *                        |     |
 	 *                        | ... | Function call stack
 	 *                        |     |
 	 *                        +-----+
 	 *                          low
 	 *
+	 * Stack args 6-8 are passed in x5-x7, args 9+ at [SP].
+	 * Incoming args 9+ are at [FP + 16], [FP + 24], ...
 	 */
 
 	emit_kcfi(is_main_prog ? cfi_bpf_hash : cfi_bpf_subprog_hash, ctx);
@@ -613,6 +620,9 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
 	if (ctx->stack_size && !ctx->priv_sp_used)
 		emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
 
+	if (ctx->stack_arg_size)
+		emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_arg_size), ctx);
+
 	if (ctx->arena_vm_start)
 		emit_a64_mov_i64(arena_vm_base, ctx->arena_vm_start, ctx);
 
@@ -673,6 +683,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
 	/* Update tail_call_cnt if the slot is populated. */
 	emit(A64_STR64I(tcc, ptr, 0), ctx);
 
+	if (ctx->stack_arg_size)
+		emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_arg_size), ctx);
+
 	/* restore SP */
 	if (ctx->stack_size && !ctx->priv_sp_used)
 		emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
@@ -1034,6 +1047,9 @@ static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
 	const u8 r0 = bpf2a64[BPF_REG_0];
 	const u8 ptr = bpf2a64[TCCNT_PTR];
 
+	if (ctx->stack_arg_size)
+		emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_arg_size), ctx);
+
 	/* We're done with BPF stack */
 	if (ctx->stack_size && !ctx->priv_sp_used)
 		emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
@@ -1191,6 +1207,41 @@ static int add_exception_handler(const struct bpf_insn *insn,
 	return 0;
 }
 
+static const u8 stack_arg_reg[] = { A64_R(5), A64_R(6), A64_R(7) };
+
+#define NR_STACK_ARG_REGS	ARRAY_SIZE(stack_arg_reg)
+
+static void emit_stack_arg_load(u8 dst, s16 bpf_off, struct jit_ctx *ctx)
+{
+	int idx = bpf_off / sizeof(u64) - 1;
+
+	if (idx < NR_STACK_ARG_REGS)
+		emit(A64_MOV(1, dst, stack_arg_reg[idx]), ctx);
+	else
+		emit(A64_LDR64I(dst, A64_FP, (idx - NR_STACK_ARG_REGS) * sizeof(u64) + 16), ctx);
+}
+
+static void emit_stack_arg_store(u8 src_a64, s16 bpf_off, struct jit_ctx *ctx)
+{
+	int idx = -bpf_off / sizeof(u64) - 1;
+
+	if (idx < NR_STACK_ARG_REGS)
+		emit(A64_MOV(1, stack_arg_reg[idx], src_a64), ctx);
+	else
+		emit(A64_STR64I(src_a64, A64_SP, (idx - NR_STACK_ARG_REGS) * sizeof(u64)), ctx);
+}
+
+static void emit_stack_arg_store_imm(s32 imm, s16 bpf_off, const u8 tmp, struct jit_ctx *ctx)
+{
+	int idx = -bpf_off / sizeof(u64) - 1;
+
+	emit_a64_mov_i(1, tmp, imm, ctx);
+	if (idx < NR_STACK_ARG_REGS)
+		emit(A64_MOV(1, stack_arg_reg[idx], tmp), ctx);
+	else
+		emit(A64_STR64I(tmp, A64_SP, (idx - NR_STACK_ARG_REGS) * sizeof(u64)), ctx);
+}
+
 /* JITs an eBPF instruction.
  * Returns:
  * 0  - successfully JITed an 8-byte eBPF instruction.
@@ -1646,6 +1697,11 @@ static int build_insn(const struct bpf_verifier_env *env, const struct bpf_insn
 	case BPF_LDX | BPF_MEM | BPF_H:
 	case BPF_LDX | BPF_MEM | BPF_B:
 	case BPF_LDX | BPF_MEM | BPF_DW:
+		if (insn->src_reg == BPF_REG_PARAMS) {
+			emit_stack_arg_load(dst, off, ctx);
+			break;
+		}
+		fallthrough;
 	case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
 	case BPF_LDX | BPF_PROBE_MEM | BPF_W:
 	case BPF_LDX | BPF_PROBE_MEM | BPF_H:
@@ -1672,6 +1728,8 @@ static int build_insn(const struct bpf_verifier_env *env, const struct bpf_insn
 		if (src == fp) {
 			src_adj = ctx->priv_sp_used ? priv_sp : A64_SP;
 			off_adj = off + ctx->stack_size;
+			if (!ctx->priv_sp_used)
+				off_adj += ctx->stack_arg_size;
 		} else {
 			src_adj = src;
 			off_adj = off;
@@ -1752,6 +1810,11 @@ static int build_insn(const struct bpf_verifier_env *env, const struct bpf_insn
 	case BPF_ST | BPF_MEM | BPF_H:
 	case BPF_ST | BPF_MEM | BPF_B:
 	case BPF_ST | BPF_MEM | BPF_DW:
+		if (insn->dst_reg == BPF_REG_PARAMS) {
+			emit_stack_arg_store_imm(imm, off, tmp, ctx);
+			break;
+		}
+		fallthrough;
 	case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
 	case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
 	case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
@@ -1763,6 +1826,8 @@ static int build_insn(const struct bpf_verifier_env *env, const struct bpf_insn
 		if (dst == fp) {
 			dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;
 			off_adj = off + ctx->stack_size;
+			if (!ctx->priv_sp_used)
+				off_adj += ctx->stack_arg_size;
 		} else {
 			dst_adj = dst;
 			off_adj = off;
@@ -1814,6 +1879,11 @@ static int build_insn(const struct bpf_verifier_env *env, const struct bpf_insn
 	case BPF_STX | BPF_MEM | BPF_H:
 	case BPF_STX | BPF_MEM | BPF_B:
 	case BPF_STX | BPF_MEM | BPF_DW:
+		if (insn->dst_reg == BPF_REG_PARAMS) {
+			emit_stack_arg_store(src, off, ctx);
+			break;
+		}
+		fallthrough;
 	case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
 	case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
 	case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
@@ -1825,6 +1895,8 @@ static int build_insn(const struct bpf_verifier_env *env, const struct bpf_insn
 		if (dst == fp) {
 			dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;
 			off_adj = off + ctx->stack_size;
+			if (!ctx->priv_sp_used)
+				off_adj += ctx->stack_arg_size;
 		} else {
 			dst_adj = dst;
 			off_adj = off;
@@ -2065,6 +2137,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_pr
 	ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
 	ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
 
+	if (prog->aux->stack_arg_cnt > prog->aux->incoming_stack_arg_cnt) {
+		u16 outgoing_cnt = prog->aux->stack_arg_cnt - prog->aux->incoming_stack_arg_cnt;
+		int nr_on_stack = outgoing_cnt - NR_STACK_ARG_REGS;
+
+		if (nr_on_stack > 0)
+			ctx.stack_arg_size = round_up(nr_on_stack * sizeof(u64), 16);
+	}
+
 	if (priv_stack_ptr)
 		ctx.priv_sp_used = true;
 
@@ -2229,6 +2309,11 @@ bool bpf_jit_supports_kfunc_call(void)
 	return true;
 }
 
+bool bpf_jit_supports_stack_args(void)
+{
+	return true;
+}
+
 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
 {
 	if (!aarch64_insn_copy(dst, src, len))
-- 
2.53.0-Meta


  parent reply	other threads:[~2026-05-07 21:32 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-07 21:29 [PATCH bpf-next v2 00/23] bpf: Support stack arguments for BPF functions and kfuncs Yonghong Song
2026-05-07 21:29 ` [PATCH bpf-next v2 01/23] bpf: Convert bpf_get_spilled_reg macro to static inline function Yonghong Song
2026-05-07 21:29 ` [PATCH bpf-next v2 02/23] bpf: Remove copy_register_state wrapper function Yonghong Song
2026-05-07 21:29 ` [PATCH bpf-next v2 03/23] bpf: Add helper functions for r11-based stack argument insns Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 04/23] bpf: Set sub->arg_cnt earlier in btf_prepare_func_args() Yonghong Song
2026-05-07 22:11   ` bot+bpf-ci
2026-05-09 13:05     ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 05/23] bpf: Support stack arguments for bpf functions Yonghong Song
2026-05-07 22:26   ` bot+bpf-ci
2026-05-09 12:52     ` Yonghong Song
2026-05-08 18:00   ` Alexei Starovoitov
2026-05-09 12:55     ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 06/23] bpf: Refactor jmp history to use dedicated spi/frame fields Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 07/23] bpf: Add precision marking and backtracking for stack argument slots Yonghong Song
2026-05-07 22:11   ` bot+bpf-ci
2026-05-09 13:08     ` Yonghong Song
2026-05-09  4:05   ` sashiko-bot
2026-05-10 16:41     ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 08/23] bpf: Refactor record_call_access() to extract per-arg logic Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 09/23] bpf: Extend liveness analysis to track stack argument slots Yonghong Song
2026-05-07 22:11   ` bot+bpf-ci
2026-05-09 13:29     ` Yonghong Song
2026-05-09  0:59   ` sashiko-bot
2026-05-10 16:47     ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 10/23] bpf: Reject stack arguments in non-JITed programs Yonghong Song
2026-05-07 22:11   ` bot+bpf-ci
2026-05-09  2:10   ` sashiko-bot
2026-05-10 16:59     ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 11/23] bpf: Prepare architecture JIT support for stack arguments Yonghong Song
2026-05-09  2:19   ` sashiko-bot
2026-05-10 17:05     ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 12/23] bpf: Enable r11 based insns Yonghong Song
2026-05-09  2:59   ` sashiko-bot
2026-05-10 17:11     ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 13/23] bpf: Support stack arguments for kfunc calls Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 14/23] bpf: Reject stack arguments if tail call reachable Yonghong Song
2026-05-07 22:11   ` bot+bpf-ci
2026-05-09  1:42   ` sashiko-bot
2026-05-10 17:15     ` Yonghong Song
2026-05-07 21:30 ` [PATCH bpf-next v2 15/23] bpf,x86: Implement JIT support for stack arguments Yonghong Song
2026-05-07 22:26   ` bot+bpf-ci
2026-05-10 17:21     ` Yonghong Song
2026-05-09  2:21   ` sashiko-bot
2026-05-10 17:22     ` Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 16/23] selftests/bpf: Add tests for BPF function " Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 17/23] selftests/bpf: Add tests for stack argument validation Yonghong Song
2026-05-09  1:30   ` sashiko-bot
2026-05-10 17:23     ` Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 18/23] selftests/bpf: Add BTF fixup for __naked subprog parameter names Yonghong Song
2026-05-09  1:40   ` sashiko-bot
2026-05-10 17:24     ` Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 19/23] selftests/bpf: Add verifier tests for stack argument validation Yonghong Song
2026-05-07 22:11   ` bot+bpf-ci
2026-05-10 17:27     ` Yonghong Song
2026-05-09  1:38   ` sashiko-bot
2026-05-10 17:27     ` Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 20/23] selftests/bpf: Add precision backtracking test for stack arguments Yonghong Song
2026-05-09  1:52   ` sashiko-bot
2026-05-10 17:31     ` Yonghong Song
2026-05-07 21:31 ` [PATCH bpf-next v2 21/23] bpf, arm64: Map BPF_REG_0 to x8 instead of x7 Yonghong Song
2026-05-08 18:01   ` Alexei Starovoitov
2026-05-09 13:44     ` Yonghong Song
2026-05-07 21:32 ` Yonghong Song [this message]
2026-05-09  2:15   ` [PATCH bpf-next v2 22/23] bpf, arm64: Add JIT support for stack arguments sashiko-bot
2026-05-10 17:32     ` Yonghong Song
2026-05-07 21:32 ` [PATCH bpf-next v2 23/23] selftests/bpf: Enable stack argument tests for arm64 Yonghong Song
2026-05-08 18:06 ` [PATCH bpf-next v2 00/23] bpf: Support stack arguments for BPF functions and kfuncs Alexei Starovoitov
2026-05-09 13:43   ` Yonghong Song

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260507213201.1140284-1-yonghong.song@linux.dev \
    --to=yonghong.song@linux.dev \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=jose.marchesi@oracle.com \
    --cc=kernel-team@fb.com \
    --cc=martin.lau@kernel.org \
    --cc=puranjay@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox