From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
To: Michael Ellerman <mpe@ellerman.id.au>,
Daniel Borkmann <daniel@iogearbox.net>,
Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: ykaliuta@redhat.com, johan.almbladh@anyfinetworks.com,
linuxppc-dev@lists.ozlabs.org, song@kernel.org,
bpf@vger.kernel.org, Jiri Olsa <jolsa@redhat.com>,
Hari Bathini <hbathini@linux.ibm.com>
Subject: [PATCH 12/13] powerpc64/bpf elfv1: Do not load TOC before calling functions
Date: Thu, 6 Jan 2022 17:15:16 +0530 [thread overview]
Message-ID: <f8b4c430850c369c85d28d5bb80596521d137740.1641468127.git.naveen.n.rao@linux.vnet.ibm.com> (raw)
In-Reply-To: <cover.1641468127.git.naveen.n.rao@linux.vnet.ibm.com>
BPF helpers always reside in core kernel and all BPF programs use the
kernel TOC. As such, there is no need to load the TOC before calling
helpers or other BPF functions. Drop code to do the same.
Add a check to ensure we don't proceed if this assumption ever changes
in future.
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
arch/powerpc/net/bpf_jit.h | 2 +-
arch/powerpc/net/bpf_jit_comp.c | 4 +++-
arch/powerpc/net/bpf_jit_comp32.c | 8 +++++--
arch/powerpc/net/bpf_jit_comp64.c | 39 ++++++++++++++++---------------
4 files changed, 30 insertions(+), 23 deletions(-)
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 3b5c44c0b6638d..5cb3efd76715a9 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -181,7 +181,7 @@ static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
ctx->seen &= ~(1 << (31 - i));
}
-void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
+int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
u32 *addrs, int pass);
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 141e64585b6458..635f7448ff7952 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -59,7 +59,9 @@ static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
*/
tmp_idx = ctx->idx;
ctx->idx = addrs[i] / 4;
- bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ if (ret)
+ return ret;
/*
* Restore ctx->idx here. This is safe as the length
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 8c918db4c2c486..ce753aca5b3321 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -185,7 +185,7 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_BLR());
}
-void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
+int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
{
s32 rel = (s32)func - (s32)(image + ctx->idx);
@@ -201,6 +201,8 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
EMIT(PPC_RAW_MTCTR(_R0));
EMIT(PPC_RAW_BCTRL());
}
+
+ return 0;
}
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
@@ -953,7 +955,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), _R1, 12));
}
- bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ if (ret)
+ return ret;
EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, _R3));
EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), _R4));
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index e05b577d95bf11..5da8e54d4d70b6 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -152,9 +152,13 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_BLR());
}
-static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
- u64 func)
+static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
{
+ unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
+
+ if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
+ return -EINVAL;
+
#ifdef PPC64_ELF_ABI_v1
/* func points to the function descriptor */
PPC_LI64(b2p[TMP_REG_2], func);
@@ -162,25 +166,23 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
/* ... and move it to CTR */
EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
- /*
- * Load TOC from function descriptor at offset 8.
- * We can clobber r2 since we get called through a
- * function pointer (so caller will save/restore r2)
- * and since we don't use a TOC ourself.
- */
- PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
#else
/* We can clobber r12 */
PPC_FUNC_ADDR(12, func);
EMIT(PPC_RAW_MTCTR(12));
#endif
EMIT(PPC_RAW_BCTRL());
+
+ return 0;
}
-void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
+int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
+ if (WARN_ON_ONCE(func && is_module_text_address(func)))
+ return -EINVAL;
+
/* Load function address into r12 */
PPC_LI64(12, func);
@@ -198,19 +200,14 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
EMIT(PPC_RAW_NOP());
#ifdef PPC64_ELF_ABI_v1
- /*
- * Load TOC from function descriptor at offset 8.
- * We can clobber r2 since we get called through a
- * function pointer (so caller will save/restore r2)
- * and since we don't use a TOC ourself.
- */
- PPC_BPF_LL(2, 12, 8);
/* Load actual entry point from function descriptor */
PPC_BPF_LL(12, 12, 0);
#endif
EMIT(PPC_RAW_MTCTR(12));
EMIT(PPC_RAW_BCTRL());
+
+ return 0;
}
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
@@ -896,9 +893,13 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
return ret;
if (func_addr_fixed)
- bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
else
- bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+
+ if (ret)
+ return ret;
+
/* move return value from r3 to BPF_REG_0 */
EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
break;
--
2.34.1
next prev parent reply other threads:[~2022-01-06 11:55 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-06 11:45 [PATCH 00/13] powerpc/bpf: Some fixes and updates Naveen N. Rao
2022-01-06 11:45 ` [PATCH 01/13] bpf: Guard against accessing NULL pt_regs in bpf_get_task_stack() Naveen N. Rao
2022-01-07 10:21 ` Daniel Borkmann
2022-01-10 8:57 ` Christophe Leroy
2022-01-10 10:36 ` Naveen N. Rao
2022-01-06 11:45 ` [PATCH 02/13] powerpc32/bpf: Fix codegen for bpf-to-bpf calls Naveen N. Rao
2022-01-10 9:06 ` Christophe Leroy
2022-01-10 10:52 ` Naveen N. Rao
2022-01-06 11:45 ` [PATCH 03/13] powerpc/bpf: Update ldimm64 instructions during extra pass Naveen N. Rao
2022-01-08 14:45 ` Jiri Olsa
2022-01-10 9:27 ` Christophe Leroy
2022-01-10 10:56 ` Naveen N. Rao
2022-01-06 11:45 ` [PATCH 04/13] tools/bpf: Rename 'struct event' to avoid naming conflict Naveen N. Rao
2022-01-07 10:21 ` Daniel Borkmann
2022-01-06 11:45 ` [PATCH 05/13] powerpc/bpf: Skip branch range validation during first pass Naveen N. Rao
2022-01-06 11:45 ` [PATCH 06/13] powerpc/bpf: Emit a single branch instruction for known short branch ranges Naveen N. Rao
2022-01-06 11:45 ` [PATCH 07/13] powerpc/bpf: Handle large branch ranges with BPF_EXIT Naveen N. Rao
2022-01-06 11:45 ` [PATCH 08/13] powerpc64/bpf: Limit 'ldbrx' to processors compliant with ISA v2.06 Naveen N. Rao
2022-01-06 11:45 ` [PATCH 09/13] powerpc64/bpf: Do not save/restore LR on each call to bpf_stf_barrier() Naveen N. Rao
2022-01-06 11:45 ` [PATCH 10/13] powerpc64/bpf: Use r12 for constant blinding Naveen N. Rao
2022-01-06 11:45 ` [PATCH 11/13] powerpc64/bpf elfv2: Setup kernel TOC in r2 on entry Naveen N. Rao
2022-01-10 9:20 ` Christophe Leroy
2022-01-11 10:31 ` Naveen N. Rao
2022-01-11 14:35 ` Christophe Leroy
2022-01-11 14:43 ` Christophe Leroy
2022-01-14 11:17 ` Naveen N. Rao
2022-01-06 11:45 ` Naveen N. Rao [this message]
2022-01-06 11:45 ` [PATCH 13/13] powerpc64/bpf: Optimize instruction sequence used for function calls Naveen N. Rao
2022-01-06 21:46 ` [PATCH 00/13] powerpc/bpf: Some fixes and updates Daniel Borkmann
2022-01-07 7:36 ` Naveen N. Rao
2022-01-07 10:20 ` Daniel Borkmann
2022-01-10 3:47 ` Michael Ellerman
2022-01-16 10:41 ` Michael Ellerman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=f8b4c430850c369c85d28d5bb80596521d137740.1641468127.git.naveen.n.rao@linux.vnet.ibm.com \
--to=naveen.n.rao@linux.vnet.ibm.com \
--cc=alexei.starovoitov@gmail.com \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=hbathini@linux.ibm.com \
--cc=johan.almbladh@anyfinetworks.com \
--cc=jolsa@redhat.com \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=mpe@ellerman.id.au \
--cc=song@kernel.org \
--cc=ykaliuta@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).