From: adubey@linux.ibm.com
To: bpf@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
linux-kselftest@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: hbathini@linux.ibm.com, sachinpb@linux.ibm.com,
venkat88@linux.ibm.com, andrii@kernel.org, eddyz87@gmail.com,
mykolal@fb.com, ast@kernel.org, daniel@iogearbox.net,
martin.lau@linux.dev, song@kernel.org, yonghong.song@linux.dev,
john.fastabend@gmail.com, kpsingh@kernel.org, sdf@fomichev.me,
haoluo@google.com, jolsa@kernel.org, christophe.leroy@csgroup.eu,
naveen@kernel.org, maddy@linux.ibm.com, mpe@ellerman.id.au,
npiggin@gmail.com, memxor@gmail.com, iii@linux.ibm.com,
shuah@kernel.org
Subject: [PATCH v4 5/6] powerpc64/bpf: Support exceptions
Date: Fri, 23 Jan 2026 02:48:53 +0530 [thread overview]
Message-ID: <20260122211854.5508-6-adubey@linux.ibm.com> (raw)
In-Reply-To: <20260122211854.5508-1-adubey@linux.ibm.com>
From: Abhishek Dubey <adubey@linux.ibm.com>
The modified prologue/epilogue generation code now
enables exception-callback to use the stack frame of
the program marked as exception boundary, where callee
saved registers are stored.
As per ppc64 ABIv2 documentation[1], r14-r31 are callee
saved registers. BPF programs on ppc64 already saves
r26-r31 registers. Saving the remaining set of callee
saved registers(r14-r25) is handled in the next patch.
[1] https://ftp.rtems.org/pub/rtems/people/sebh/ABI64BitOpenPOWERv1.1_16July2015_pub.pdf
Signed-off-by: Abhishek Dubey <adubey@linux.ibm.com>
---
arch/powerpc/net/bpf_jit.h | 2 ++
arch/powerpc/net/bpf_jit_comp.c | 7 ++++
arch/powerpc/net/bpf_jit_comp64.c | 58 +++++++++++++++++++++----------
3 files changed, 48 insertions(+), 19 deletions(-)
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 56f56fdd4969..82bbf63f0e57 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -179,6 +179,8 @@ struct codegen_context {
u64 arena_vm_start;
u64 user_vm_start;
bool is_subprog;
+ bool exception_boundary;
+ bool exception_cb;
};
#define bpf_to_ppc(r) (ctx->b2p[r])
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 1a305f0fed27..2607ea0bedef 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -207,6 +207,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena);
cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena);
cgctx.is_subprog = bpf_is_subprog(fp);
+ cgctx.exception_boundary = fp->aux->exception_boundary;
+ cgctx.exception_cb = fp->aux->exception_cb;
/* Scouting faux-generate pass 0 */
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
@@ -436,6 +438,11 @@ void bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
+bool bpf_jit_supports_exceptions(void)
+{
+ return IS_ENABLED(CONFIG_PPC64);
+}
+
bool bpf_jit_supports_subprog_tailcalls(void)
{
return IS_ENABLED(CONFIG_PPC64);
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index c25ba1ad587a..d7cd8ab6559c 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -89,7 +89,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
* - the bpf program uses its stack area
* The latter condition is deduced from the usage of BPF_REG_FP
*/
- return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
+ return ctx->seen & SEEN_FUNC ||
+ bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)) ||
+ ctx->exception_cb;
}
/*
@@ -161,8 +163,13 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
/* this goes in the redzone */
EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL)));
- } else {
+ } else if (!ctx->exception_cb) {
/*
+ * Tailcall jitting for non exception_cb progs only.
+ * exception_cb won't require tail_call_info to be setup.
+ *
+ * tail_call_info interpretation logic:
+ *
* if tail_call_info < MAX_TAIL_CALL_CNT
* main prog calling first subprog -> copy reference
* else
@@ -177,8 +184,12 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL)));
}
- if (bpf_has_stack_frame(ctx)) {
+ if (bpf_has_stack_frame(ctx) && !ctx->exception_cb) {
/*
+ * exception_cb uses boundary frame after stack walk.
+ * It can simply use redzone, this optimization reduces
+ * stack walk loop by one level.
+ *
* We need a stack frame, but we don't necessarily need to
* save/restore LR unless we call other functions
*/
@@ -190,23 +201,32 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
}
- /*
- * Back up non-volatile regs -- BPF registers 6-10
- * If we haven't created our own stack frame, we save these
- * in the protected zone below the previous stack frame
- */
- for (i = BPF_REG_6; i <= BPF_REG_10; i++)
- if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
- EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
+ if (!ctx->exception_cb) {
+ /*
+ * Back up non-volatile regs -- BPF registers 6-10
+ * If we haven't created our own stack frame, we save these
+ * in the protected zone below the previous stack frame
+ */
+ for (i = BPF_REG_6; i <= BPF_REG_10; i++)
+ if (ctx->exception_boundary || bpf_is_seen_register(ctx, bpf_to_ppc(i)))
+ EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1,
+ bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
- if (ctx->arena_vm_start)
- EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1,
+ if (ctx->exception_boundary || ctx->arena_vm_start)
+ EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1,
bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
- /* Setup frame pointer to point to the bpf stack area */
- if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
- EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
+ /* Setup frame pointer to point to the bpf stack area */
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
STACK_FRAME_MIN_SIZE + ctx->stack_size));
+ } else {
+ /*
+ * Exception callback receives Frame Pointer of main
+ * program as third arg
+ */
+ EMIT(PPC_RAW_MR(_R1, _R5));
+ }
if (ctx->arena_vm_start)
PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start);
@@ -218,17 +238,17 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
/* Restore NVRs */
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
- if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
+ if (ctx->exception_cb || bpf_is_seen_register(ctx, bpf_to_ppc(i)))
EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
- if (ctx->arena_vm_start)
+ if (ctx->exception_cb || ctx->arena_vm_start)
EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1,
bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
/* Tear down our stack frame */
if (bpf_has_stack_frame(ctx)) {
EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
- if (ctx->seen & SEEN_FUNC) {
+ if (ctx->seen & SEEN_FUNC || ctx->exception_cb) {
EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
EMIT(PPC_RAW_MTLR(_R0));
}
--
2.48.1
next prev parent reply other threads:[~2026-01-22 21:21 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-22 21:18 [PATCH v4 0/6] powerpc64/bpf: Support tailcalls with subprogs & BPF exceptions adubey
2026-01-22 21:18 ` [PATCH v4 1/6] powerpc64/bpf: Moving tail_call_cnt to bottom of frame adubey
2026-01-23 12:45 ` Hari Bathini
2026-01-22 21:18 ` [PATCH v4 2/6] powerpc64/bpf: Support tailcalls with subprogs adubey
2026-01-23 12:48 ` Hari Bathini
2026-01-22 21:18 ` [PATCH v4 3/6] powerpc64/bpf: Avoid tailcall restore from trampoline adubey
2026-01-22 21:18 ` [PATCH v4 4/6] powerpc64/bpf: Add arch_bpf_stack_walk() for BPF JIT adubey
2026-01-23 12:51 ` Hari Bathini
2026-01-22 21:18 ` adubey [this message]
2026-01-23 12:54 ` [PATCH v4 5/6] powerpc64/bpf: Support exceptions Hari Bathini
[not found] ` <9f35f6799b0b27866259582a2eefecb3@imap.linux.ibm.com>
2026-01-23 18:13 ` Hari Bathini
2026-01-22 21:18 ` [PATCH v4 6/6] powerpc64/bpf: Additional NVR handling for bpf_throw adubey
2026-01-22 21:40 ` bot+bpf-ci
2026-01-23 13:17 ` Hari Bathini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260122211854.5508-6-adubey@linux.ibm.com \
--to=adubey@linux.ibm.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=christophe.leroy@csgroup.eu \
--cc=daniel@iogearbox.net \
--cc=eddyz87@gmail.com \
--cc=haoluo@google.com \
--cc=hbathini@linux.ibm.com \
--cc=iii@linux.ibm.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kpsingh@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=maddy@linux.ibm.com \
--cc=martin.lau@linux.dev \
--cc=memxor@gmail.com \
--cc=mpe@ellerman.id.au \
--cc=mykolal@fb.com \
--cc=naveen@kernel.org \
--cc=npiggin@gmail.com \
--cc=sachinpb@linux.ibm.com \
--cc=sdf@fomichev.me \
--cc=shuah@kernel.org \
--cc=song@kernel.org \
--cc=venkat88@linux.ibm.com \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox