public inbox for live-patching@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] powerpc64/bpf: support direct_call on livepatch function
@ 2025-10-02 19:27 Hari Bathini
  2025-10-03 11:21 ` kernel test robot
  2025-10-06  7:52 ` Naveen N Rao
  0 siblings, 2 replies; 11+ messages in thread
From: Hari Bathini @ 2025-10-02 19:27 UTC (permalink / raw)
  To: Madhavan Srinivasan
  Cc: linuxppc-dev, Christophe Leroy, Naveen N. Rao, Michael Ellerman,
	Nicholas Piggin, bpf, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Song Liu, Jiri Olsa, Viktor Malik, live-patching,
	Josh Poimboeuf, Joe Lawrence, Jiri Kosina, linux-trace-kernel,
	Steven Rostedt, Masami Hiramatsu, Mark Rutland, Shung-Hsi Yu

Today, livepatch takes precedence over direct_call. Instead, save the
state and make direct_call before handling livepatch. This change
inadvertly skips livepatch stack restore, when an attached fmod_ret
program fails. To handle this scenario, set cr0.eq bit to indicate
livepatch is active while making the direct_call, save the expected
livepatch stack state on the trampoline stack and restore it, if and
when required, during do_fexit in the trampoline code.

Reported-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
Closes: https://lore.kernel.org/all/rwmwrvvtg3pd7qrnt3of6dideioohwhsplancoc2gdrjran7bg@j5tqng6loymr/
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
---
 arch/powerpc/include/asm/livepatch.h     | 15 +++++
 arch/powerpc/kernel/trace/ftrace_entry.S | 74 ++++++++++++++++++++----
 arch/powerpc/net/bpf_jit_comp.c          | 71 ++++++++++++++++++++++-
 3 files changed, 149 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index d044a1fd4f44..356c1eb46f5d 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -7,6 +7,20 @@
 #ifndef _ASM_POWERPC_LIVEPATCH_H
 #define _ASM_POWERPC_LIVEPATCH_H
 
+#ifdef CONFIG_LIVEPATCH_64
+#define LIVEPATCH_STACK_MAGIC_OFFSET	8
+#define LIVEPATCH_STACK_LR_OFFSET	16
+#define LIVEPATCH_STACK_TOC_OFFSET	24
+
+#if defined(CONFIG_PPC_FTRACE_OUT_OF_LINE) && defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS)
+#define LIVEPATCH_STACK_FRAME_SIZE	32	/* Allocate 4 x 8 bytes (to save new NIP as well) */
+#define LIVEPATCH_STACK_NIP_OFFSET	32
+#else
+#define LIVEPATCH_STACK_FRAME_SIZE	24	/* Allocate 3 x 8 bytes */
+#endif
+#endif
+
+#ifndef __ASSEMBLY__
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
 
@@ -20,4 +34,5 @@ static inline void klp_init_thread_info(struct task_struct *p)
 static inline void klp_init_thread_info(struct task_struct *p) { }
 #endif
 
+#endif /* !__ASSEMBLY__ */
 #endif /* _ASM_POWERPC_LIVEPATCH_H */
diff --git a/arch/powerpc/kernel/trace/ftrace_entry.S b/arch/powerpc/kernel/trace/ftrace_entry.S
index 6599fe3c6234..b98f12f378b1 100644
--- a/arch/powerpc/kernel/trace/ftrace_entry.S
+++ b/arch/powerpc/kernel/trace/ftrace_entry.S
@@ -8,6 +8,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/ftrace.h>
+#include <asm/livepatch.h>
 #include <asm/ppc-opcode.h>
 #include <asm/thread_info.h>
 #include <asm/bug.h>
@@ -244,6 +245,8 @@
 	/* jump after _mcount site */
 #ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+	/* For direct_call, set cr0.eq bit only if livepatch is active */
+	crclr	4*cr0+eq
 	bnectr	cr1
 #endif
 	/*
@@ -306,10 +309,14 @@ ftrace_no_trace:
 	mtctr	r12
 	REST_GPRS(11, 12, r1)
 	addi	r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+	/* For direct_call, set cr0.eq bit only if livepatch is active */
+	crclr	4*cr0+eq
 	bctr
 .Lftrace_direct_call:
 	mtctr	r12
 	addi	r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+	/* For direct_call, set cr0.eq bit only if livepatch is active */
+	crclr	4*cr0+eq
 	bctr
 SYM_FUNC_START(ftrace_stub_direct_tramp)
 	blr
@@ -340,25 +347,72 @@ SYM_FUNC_END(ftrace_stub_direct_tramp)
 livepatch_handler:
 	ld	r12, PACA_THREAD_INFO(r13)
 
-	/* Allocate 3 x 8 bytes */
 	ld	r11, TI_livepatch_sp(r12)
-	addi	r11, r11, 24
+	/* Allocate stack to save LR, TOC & optionally NIP (in case of direct_call) */
+	addi	r11, r11, LIVEPATCH_STACK_FRAME_SIZE
 	std	r11, TI_livepatch_sp(r12)
 
 	/* Store stack end marker */
 	lis     r12, STACK_END_MAGIC@h
 	ori     r12, r12, STACK_END_MAGIC@l
-	std	r12, -8(r11)
+	std	r12, -LIVEPATCH_STACK_MAGIC_OFFSET(r11)
 
 	/* Save toc & real LR on livepatch stack */
-	std	r2,  -24(r11)
+	std	r2,  -LIVEPATCH_STACK_TOC_OFFSET(r11)
 #ifndef CONFIG_PPC_FTRACE_OUT_OF_LINE
 	mflr	r12
-	std	r12, -16(r11)
+	std	r12, -LIVEPATCH_STACK_LR_OFFSET(r11)
 	mfctr	r12
 #else
-	std	r0, -16(r11)
+	std	r0, -LIVEPATCH_STACK_LR_OFFSET(r11)
 	mflr	r12
+
+	/* Also, save new NIP on livepatch stack before the direct_call */
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+	std	r12, -LIVEPATCH_STACK_NIP_OFFSET(r11)
+
+	/* For direct_call, set cr0.eq bit to indicate livepatch is active */
+	crset	4*cr0+eq
+	/* Jump to the direct_call */
+	bnectrl	cr1
+
+	/*
+	 * The address to jump after direct call is deduced based on ftrace OOL stub sequence.
+	 * The seemingly insignificant couple of instructions below is to mimic that here to
+	 * jump back to the livepatch handler code below.
+	 */
+	nop
+	b	1f
+
+	/*
+	 * Restore the state for livepatching from the livepatch stack.
+	 * Before that, check if livepatch stack is intact. Use r0 for it.
+	 */
+1:	mtctr	r0
+	ld	r12, PACA_THREAD_INFO(r13)
+	ld	r11, TI_livepatch_sp(r12)
+	lis     r0,  STACK_END_MAGIC@h
+	ori     r0,  r0, STACK_END_MAGIC@l
+	ld	r12, -LIVEPATCH_STACK_MAGIC_OFFSET(r11)
+1:	tdne	r12, r0
+	EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
+	mfctr	r0
+
+	/*
+	 * A change in r0 implies the direct_call is not done yet. The direct_call
+	 * will take care of calling the original LR. Update r0 in livepatch stack
+	 * with the new LR in the direct_call.
+	 */
+	ld	r12, -LIVEPATCH_STACK_LR_OFFSET(r11)
+	cmpd	r12, r0
+	beq	1f
+	mflr	r0
+	std	r0, -LIVEPATCH_STACK_LR_OFFSET(r11)
+
+	/* Put new NIP back in r12 to proceed with livepatch handling */
+1:	ld	r12, -LIVEPATCH_STACK_NIP_OFFSET(r11)
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
 	/* Put ctr in r12 for global entry and branch there */
 	mtctr	r12
 #endif
@@ -377,18 +431,18 @@ livepatch_handler:
 	/* Check stack marker hasn't been trashed */
 	lis     r2,  STACK_END_MAGIC@h
 	ori     r2,  r2, STACK_END_MAGIC@l
-	ld	r12, -8(r11)
+	ld	r12, -LIVEPATCH_STACK_MAGIC_OFFSET(r11)
 1:	tdne	r12, r2
 	EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
 
 	/* Restore LR & toc from livepatch stack */
-	ld	r12, -16(r11)
+	ld	r12, -LIVEPATCH_STACK_LR_OFFSET(r11)
 	mtlr	r12
-	ld	r2,  -24(r11)
+	ld	r2,  -LIVEPATCH_STACK_TOC_OFFSET(r11)
 
 	/* Pop livepatch stack frame */
 	ld	r12, PACA_THREAD_INFO(r13)
-	subi	r11, r11, 24
+	subi	r11, r11, LIVEPATCH_STACK_FRAME_SIZE
 	std	r11, TI_livepatch_sp(r12)
 
 	/* Return to original caller of live patched function */
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 88ad5ba7b87f..cc86867d85cd 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -19,6 +19,7 @@
 
 #include <asm/kprobes.h>
 #include <asm/text-patching.h>
+#include <asm/livepatch.h>
 
 #include "bpf_jit.h"
 
@@ -678,14 +679,16 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
 					 struct bpf_tramp_links *tlinks,
 					 void *func_addr)
 {
-	int regs_off, nregs_off, ip_off, run_ctx_off, retval_off, nvr_off, alt_lr_off, r4_off = 0;
 	int i, ret, nr_regs, bpf_frame_size = 0, bpf_dummy_frame_size = 0, func_frame_offset;
 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+	int regs_off, nregs_off, ip_off, run_ctx_off, retval_off, nvr_off;
 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
+	int alt_lr_off, r4_off = 0, livepatch_sp_off = 0;
 	struct codegen_context codegen_ctx, *ctx;
 	u32 *image = (u32 *)rw_image;
 	ppc_inst_t branch_insn;
+	bool handle_lp = false;
 	u32 *branches = NULL;
 	bool save_ret;
 
@@ -716,6 +719,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
 	 * dummy frame for unwind       [ back chain 1      ] --
 	 *                              [ padding           ] align stack frame
 	 *       r4_off                 [ r4 (tailcallcnt)  ] optional - 32-bit powerpc
+	 *                              [ *current.TI.lp_sp ]
+	 *    livepatch_sp_off          [ current.TI.lp_sp  ] optional - livepatch stack info
 	 *       alt_lr_off             [ real lr (ool stub)] optional - actual lr
 	 *                              [ r26               ]
 	 *       nvr_off                [ r25               ] nvr save area
@@ -780,10 +785,20 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
 	nvr_off = bpf_frame_size;
 	bpf_frame_size += 2 * SZL;
 
+
 	/* Optional save area for actual LR in case of ool ftrace */
 	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
 		alt_lr_off = bpf_frame_size;
 		bpf_frame_size += SZL;
+		if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS)) {
+			handle_lp = (func_ptr_is_kernel_text(func_addr) && fmod_ret->nr_links &&
+				     (flags & BPF_TRAMP_F_CALL_ORIG));
+		}
+	}
+
+	if (handle_lp) {
+		livepatch_sp_off = bpf_frame_size;
+		bpf_frame_size += 2 * SZL;
 	}
 
 	if (IS_ENABLED(CONFIG_PPC32)) {
@@ -822,6 +837,30 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
 	if (IS_ENABLED(CONFIG_PPC32) && nr_regs < 2)
 		EMIT(PPC_RAW_STL(_R4, _R1, r4_off));
 
+	/* Save expected livepatch stack state on the trampoline stack */
+	if (handle_lp) {
+		/*
+		 * The caller is expected to set cr0.eq bit, if livepatch was active on it.
+		 *
+		 * If livepatch is active, save address & the expected value of
+		 * livepatch stack pointer on the trampoline stack.
+		 * Else, set both of them to 0.
+		 */
+		PPC_BCC_SHORT(COND_EQ, (ctx->idx + 5) * 4);
+		EMIT(PPC_RAW_LI(_R12, 0));
+		EMIT(PPC_RAW_STL(_R12, _R1, livepatch_sp_off));
+		EMIT(PPC_RAW_STL(_R12, _R1, livepatch_sp_off + SZL));
+		PPC_JMP((ctx->idx + 7) * 4);
+
+		EMIT(PPC_RAW_LL(_R12, _R13, offsetof(struct paca_struct, __current) +
+					    offsetof(struct task_struct, thread_info)));
+		EMIT(PPC_RAW_ADDI(_R12, _R12, offsetof(struct thread_info, livepatch_sp)));
+		EMIT(PPC_RAW_STL(_R12, _R1, livepatch_sp_off));
+		EMIT(PPC_RAW_LL(_R12, _R12, 0));
+		EMIT(PPC_RAW_ADDI(_R12, _R12, -LIVEPATCH_STACK_FRAME_SIZE));
+		EMIT(PPC_RAW_STL(_R12, _R1, livepatch_sp_off + SZL));
+	}
+
 	bpf_trampoline_save_args(image, ctx, func_frame_offset, nr_regs, regs_off);
 
 	/* Save our return address */
@@ -932,6 +971,36 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
 		image[branches[i]] = ppc_inst_val(branch_insn);
 	}
 
+	/*
+	 * Restore livepatch stack state if livepatch was active & an attached
+	 * fmod_ret program failed.
+	 */
+	if (handle_lp) {
+		EMIT(PPC_RAW_LL(_R12, _R1, livepatch_sp_off + SZL));
+		EMIT(PPC_RAW_CMPLI(_R12, 0));
+
+		/*
+		 * If expected value (_R12) of livepatch stack pointer saved on the
+		 * trampoline stack is 0, livepatch was not active. Skip the rest.
+		 */
+		PPC_BCC_SHORT(COND_EQ, (ctx->idx + 7) * 4);
+
+		EMIT(PPC_RAW_LL(_R25, _R1, livepatch_sp_off));
+		EMIT(PPC_RAW_LL(_R25, _R25, 0));
+
+		/*
+		 * If the expected value (_R12) of livepatch stack pointer saved on the
+		 * trampoline stack is not the same as actual value (_R25), it implies
+		 * fmod_ret program failed and skipped calling the traced/livepatch'ed
+		 * function. The livepatch'ed function did not get a chance to tear down
+		 * the livepatch stack it setup. Take care of that here in do_fexit.
+		 */
+		EMIT(PPC_RAW_CMPD(_R12, _R25));
+		PPC_BCC_SHORT(COND_EQ, (ctx->idx + 3) * 4);
+		EMIT(PPC_RAW_LL(_R25, _R1, livepatch_sp_off));
+		EMIT(PPC_RAW_STL(_R12, _R25, 0));
+	}
+
 	for (i = 0; i < fexit->nr_links; i++)
 		if (invoke_bpf_prog(image, ro_image, ctx, fexit->links[i], regs_off, retval_off,
 				    run_ctx_off, false)) {
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH] powerpc64/bpf: support direct_call on livepatch function
  2025-10-02 19:27 [PATCH] powerpc64/bpf: support direct_call on livepatch function Hari Bathini
@ 2025-10-03 11:21 ` kernel test robot
  2025-10-06  7:52 ` Naveen N Rao
  1 sibling, 0 replies; 11+ messages in thread
From: kernel test robot @ 2025-10-03 11:21 UTC (permalink / raw)
  To: Hari Bathini, Madhavan Srinivasan
  Cc: oe-kbuild-all, linuxppc-dev, Christophe Leroy, Naveen N. Rao,
	Michael Ellerman, Nicholas Piggin, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Song Liu, Jiri Olsa,
	Viktor Malik, live-patching, Josh Poimboeuf, Joe Lawrence,
	Jiri Kosina, linux-trace-kernel, Steven Rostedt, Masami Hiramatsu,
	Mark Rutland, Shung-Hsi Yu

Hi Hari,

kernel test robot noticed the following build errors:

[auto build test ERROR on powerpc/next]
[also build test ERROR on powerpc/fixes trace/for-next bpf-next/net bpf-next/master bpf/master linus/master v6.17 next-20251002]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Hari-Bathini/powerpc64-bpf-support-direct_call-on-livepatch-function/20251003-033243
base:   https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
patch link:    https://lore.kernel.org/r/20251002192755.86441-1-hbathini%40linux.ibm.com
patch subject: [PATCH] powerpc64/bpf: support direct_call on livepatch function
config: powerpc64-randconfig-001-20251003 (https://download.01.org/0day-ci/archive/20251003/202510031817.t50YvoeN-lkp@intel.com/config)
compiler: powerpc64-linux-gcc (GCC) 12.5.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251003/202510031817.t50YvoeN-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202510031817.t50YvoeN-lkp@intel.com/

All errors (new ones prefixed by >>):

   In file included from arch/powerpc/net/bpf_jit_comp.c:24:
   arch/powerpc/net/bpf_jit_comp.c: In function '__arch_prepare_bpf_trampoline':
>> include/linux/stddef.h:16:33: error: 'struct thread_info' has no member named 'livepatch_sp'
      16 | #define offsetof(TYPE, MEMBER)  __builtin_offsetof(TYPE, MEMBER)
         |                                 ^~~~~~~~~~~~~~~~~~
   arch/powerpc/net/bpf_jit.h:29:34: note: in definition of macro 'PLANT_INSTR'
      29 |         do { if (d) { (d)[idx] = instr; } idx++; } while (0)
         |                                  ^~~~~
   arch/powerpc/net/bpf_jit_comp.c:857:17: note: in expansion of macro 'EMIT'
     857 |                 EMIT(PPC_RAW_ADDI(_R12, _R12, offsetof(struct thread_info, livepatch_sp)));
         |                 ^~~~
   arch/powerpc/include/asm/ppc-opcode.h:501:85: note: in expansion of macro 'IMM_L'
     501 | #define PPC_RAW_ADDI(d, a, i)           (0x38000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
         |                                                                                     ^~~~~
   arch/powerpc/net/bpf_jit_comp.c:857:22: note: in expansion of macro 'PPC_RAW_ADDI'
     857 |                 EMIT(PPC_RAW_ADDI(_R12, _R12, offsetof(struct thread_info, livepatch_sp)));
         |                      ^~~~~~~~~~~~
   arch/powerpc/net/bpf_jit_comp.c:857:47: note: in expansion of macro 'offsetof'
     857 |                 EMIT(PPC_RAW_ADDI(_R12, _R12, offsetof(struct thread_info, livepatch_sp)));
         |                                               ^~~~~~~~
>> arch/powerpc/net/bpf_jit_comp.c:860:48: error: 'LIVEPATCH_STACK_FRAME_SIZE' undeclared (first use in this function)
     860 |                 EMIT(PPC_RAW_ADDI(_R12, _R12, -LIVEPATCH_STACK_FRAME_SIZE));
         |                                                ^~~~~~~~~~~~~~~~~~~~~~~~~~
   arch/powerpc/net/bpf_jit.h:29:34: note: in definition of macro 'PLANT_INSTR'
      29 |         do { if (d) { (d)[idx] = instr; } idx++; } while (0)
         |                                  ^~~~~
   arch/powerpc/net/bpf_jit_comp.c:860:17: note: in expansion of macro 'EMIT'
     860 |                 EMIT(PPC_RAW_ADDI(_R12, _R12, -LIVEPATCH_STACK_FRAME_SIZE));
         |                 ^~~~
   arch/powerpc/include/asm/ppc-opcode.h:501:85: note: in expansion of macro 'IMM_L'
     501 | #define PPC_RAW_ADDI(d, a, i)           (0x38000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
         |                                                                                     ^~~~~
   arch/powerpc/net/bpf_jit_comp.c:860:22: note: in expansion of macro 'PPC_RAW_ADDI'
     860 |                 EMIT(PPC_RAW_ADDI(_R12, _R12, -LIVEPATCH_STACK_FRAME_SIZE));
         |                      ^~~~~~~~~~~~
   arch/powerpc/net/bpf_jit_comp.c:860:48: note: each undeclared identifier is reported only once for each function it appears in
     860 |                 EMIT(PPC_RAW_ADDI(_R12, _R12, -LIVEPATCH_STACK_FRAME_SIZE));
         |                                                ^~~~~~~~~~~~~~~~~~~~~~~~~~
   arch/powerpc/net/bpf_jit.h:29:34: note: in definition of macro 'PLANT_INSTR'
      29 |         do { if (d) { (d)[idx] = instr; } idx++; } while (0)
         |                                  ^~~~~
   arch/powerpc/net/bpf_jit_comp.c:860:17: note: in expansion of macro 'EMIT'
     860 |                 EMIT(PPC_RAW_ADDI(_R12, _R12, -LIVEPATCH_STACK_FRAME_SIZE));
         |                 ^~~~
   arch/powerpc/include/asm/ppc-opcode.h:501:85: note: in expansion of macro 'IMM_L'
     501 | #define PPC_RAW_ADDI(d, a, i)           (0x38000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
         |                                                                                     ^~~~~
   arch/powerpc/net/bpf_jit_comp.c:860:22: note: in expansion of macro 'PPC_RAW_ADDI'
     860 |                 EMIT(PPC_RAW_ADDI(_R12, _R12, -LIVEPATCH_STACK_FRAME_SIZE));
         |                      ^~~~~~~~~~~~


vim +16 include/linux/stddef.h

6e218287432472 Richard Knutsson 2006-09-30  14  
^1da177e4c3f41 Linus Torvalds   2005-04-16  15  #undef offsetof
14e83077d55ff4 Rasmus Villemoes 2022-03-23 @16  #define offsetof(TYPE, MEMBER)	__builtin_offsetof(TYPE, MEMBER)
3876488444e712 Denys Vlasenko   2015-03-09  17  

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] powerpc64/bpf: support direct_call on livepatch function
  2025-10-02 19:27 [PATCH] powerpc64/bpf: support direct_call on livepatch function Hari Bathini
  2025-10-03 11:21 ` kernel test robot
@ 2025-10-06  7:52 ` Naveen N Rao
  2025-10-06 13:20   ` Hari Bathini
  1 sibling, 1 reply; 11+ messages in thread
From: Naveen N Rao @ 2025-10-06  7:52 UTC (permalink / raw)
  To: Hari Bathini
  Cc: Madhavan Srinivasan, linuxppc-dev, Christophe Leroy,
	Michael Ellerman, Nicholas Piggin, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Song Liu, Jiri Olsa,
	Viktor Malik, live-patching, Josh Poimboeuf, Joe Lawrence,
	Jiri Kosina, linux-trace-kernel, Steven Rostedt, Masami Hiramatsu,
	Mark Rutland, Shung-Hsi Yu

On Fri, Oct 03, 2025 at 12:57:54AM +0530, Hari Bathini wrote:
> Today, livepatch takes precedence over direct_call. Instead, save the
> state and make direct_call before handling livepatch.

If we call into the BPF trampoline first and if we have 
BPF_TRAMP_F_CALL_ORIG set, does this result in the BPF trampoline 
calling the new copy of the live-patched function or the old one?

- Naveen


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] powerpc64/bpf: support direct_call on livepatch function
  2025-10-06  7:52 ` Naveen N Rao
@ 2025-10-06 13:20   ` Hari Bathini
  2025-10-08  8:13     ` Naveen N Rao
  0 siblings, 1 reply; 11+ messages in thread
From: Hari Bathini @ 2025-10-06 13:20 UTC (permalink / raw)
  To: Naveen N Rao
  Cc: Madhavan Srinivasan, linuxppc-dev, Christophe Leroy,
	Michael Ellerman, Nicholas Piggin, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Song Liu, Jiri Olsa,
	Viktor Malik, live-patching, Josh Poimboeuf, Joe Lawrence,
	Jiri Kosina, linux-trace-kernel, Steven Rostedt, Masami Hiramatsu,
	Mark Rutland, Shung-Hsi Yu



On 06/10/25 1:22 pm, Naveen N Rao wrote:
> On Fri, Oct 03, 2025 at 12:57:54AM +0530, Hari Bathini wrote:
>> Today, livepatch takes precedence over direct_call. Instead, save the
>> state and make direct_call before handling livepatch.
> 
> If we call into the BPF trampoline first and if we have
> BPF_TRAMP_F_CALL_ORIG set, does this result in the BPF trampoline
> calling the new copy of the live-patched function or the old one?

Naveen, calls the new copy of the live-patched function..

- Hari

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] powerpc64/bpf: support direct_call on livepatch function
  2025-10-06 13:20   ` Hari Bathini
@ 2025-10-08  8:13     ` Naveen N Rao
  2025-10-09  5:49       ` Hari Bathini
  0 siblings, 1 reply; 11+ messages in thread
From: Naveen N Rao @ 2025-10-08  8:13 UTC (permalink / raw)
  To: Hari Bathini
  Cc: Madhavan Srinivasan, linuxppc-dev, Christophe Leroy,
	Michael Ellerman, Nicholas Piggin, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Song Liu, Jiri Olsa,
	Viktor Malik, live-patching, Josh Poimboeuf, Joe Lawrence,
	Jiri Kosina, linux-trace-kernel, Steven Rostedt, Masami Hiramatsu,
	Mark Rutland, Shung-Hsi Yu

On Mon, Oct 06, 2025 at 06:50:20PM +0530, Hari Bathini wrote:
> 
> 
> On 06/10/25 1:22 pm, Naveen N Rao wrote:
> > On Fri, Oct 03, 2025 at 12:57:54AM +0530, Hari Bathini wrote:
> > > Today, livepatch takes precedence over direct_call. Instead, save the
> > > state and make direct_call before handling livepatch.
> > 
> > If we call into the BPF trampoline first and if we have
> > BPF_TRAMP_F_CALL_ORIG set, does this result in the BPF trampoline
> > calling the new copy of the live-patched function or the old one?
> 
> Naveen, calls the new copy of the live-patched function..

Hmm... I'm probably missing something.

With ftrace OOL stubs, what I recall is that BPF trampoline derives the 
original function address from the OOL stub (which would be associated 
with the original function, not the livepatch one). This should mean 
that the trampoline continues to invoke the original function.


- Naveen


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] powerpc64/bpf: support direct_call on livepatch function
  2025-10-08  8:13     ` Naveen N Rao
@ 2025-10-09  5:49       ` Hari Bathini
  2025-10-09 11:27         ` Naveen N Rao
  0 siblings, 1 reply; 11+ messages in thread
From: Hari Bathini @ 2025-10-09  5:49 UTC (permalink / raw)
  To: Naveen N Rao
  Cc: Madhavan Srinivasan, linuxppc-dev, Christophe Leroy,
	Michael Ellerman, Nicholas Piggin, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Song Liu, Jiri Olsa,
	Viktor Malik, live-patching, Josh Poimboeuf, Joe Lawrence,
	Jiri Kosina, linux-trace-kernel, Steven Rostedt, Masami Hiramatsu,
	Mark Rutland, Shung-Hsi Yu



On 08/10/25 1:43 pm, Naveen N Rao wrote:
> On Mon, Oct 06, 2025 at 06:50:20PM +0530, Hari Bathini wrote:
>>
>>
>> On 06/10/25 1:22 pm, Naveen N Rao wrote:
>>> On Fri, Oct 03, 2025 at 12:57:54AM +0530, Hari Bathini wrote:
>>>> Today, livepatch takes precedence over direct_call. Instead, save the
>>>> state and make direct_call before handling livepatch.
>>>
>>> If we call into the BPF trampoline first and if we have
>>> BPF_TRAMP_F_CALL_ORIG set, does this result in the BPF trampoline
>>> calling the new copy of the live-patched function or the old one?
>>
>> Naveen, calls the new copy of the live-patched function..
> 
> Hmm... I'm probably missing something.
> 
> With ftrace OOL stubs, what I recall is that BPF trampoline derives the
> original function address from the OOL stub (which would be associated
> with the original function, not the livepatch one).

Trampoline derives the address from LR. The below snippet
in livepatch_handler ensures the trampoline jumps to '1f'
label instead of the original function with LR updated:

+	/* Jump to the direct_call */
+	bnectrl	cr1
+
+	/*
+	 * The address to jump after direct call is deduced based on ftrace 
OOL stub sequence.
+	 * The seemingly insignificant couple of instructions below is to 
mimic that here to
+	 * jump back to the livepatch handler code below.
+	 */
+	nop
+	b	1f
+
+	/*
+	 * Restore the state for livepatching from the livepatch stack.
+	 * Before that, check if livepatch stack is intact. Use r0 for it.
+	 */
+1:	mtctr	r0


- Hari

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] powerpc64/bpf: support direct_call on livepatch function
  2025-10-09  5:49       ` Hari Bathini
@ 2025-10-09 11:27         ` Naveen N Rao
  2025-10-10  7:17           ` Hari Bathini
  0 siblings, 1 reply; 11+ messages in thread
From: Naveen N Rao @ 2025-10-09 11:27 UTC (permalink / raw)
  To: Hari Bathini
  Cc: Madhavan Srinivasan, linuxppc-dev, Christophe Leroy,
	Michael Ellerman, Nicholas Piggin, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Song Liu, Jiri Olsa,
	Viktor Malik, live-patching, Josh Poimboeuf, Joe Lawrence,
	Jiri Kosina, linux-trace-kernel, Steven Rostedt, Masami Hiramatsu,
	Mark Rutland, Shung-Hsi Yu

On Thu, Oct 09, 2025 at 11:19:45AM +0530, Hari Bathini wrote:
> 
> 
> On 08/10/25 1:43 pm, Naveen N Rao wrote:
> > On Mon, Oct 06, 2025 at 06:50:20PM +0530, Hari Bathini wrote:
> > > 
> > > 
> > > On 06/10/25 1:22 pm, Naveen N Rao wrote:
> > > > On Fri, Oct 03, 2025 at 12:57:54AM +0530, Hari Bathini wrote:
> > > > > Today, livepatch takes precedence over direct_call. Instead, save the
> > > > > state and make direct_call before handling livepatch.
> > > > 
> > > > If we call into the BPF trampoline first and if we have
> > > > BPF_TRAMP_F_CALL_ORIG set, does this result in the BPF trampoline
> > > > calling the new copy of the live-patched function or the old one?
> > > 
> > > Naveen, calls the new copy of the live-patched function..
> > 
> > Hmm... I'm probably missing something.
> > 
> > With ftrace OOL stubs, what I recall is that BPF trampoline derives the
> > original function address from the OOL stub (which would be associated
> > with the original function, not the livepatch one).
> 
> Trampoline derives the address from LR.

Does it? I'm referring to BPF_TRAMP_F_CALL_ORIG handling in 
__arch_prepare_bpf_trampoline(). LR at BPF trampoline entry points at 
the ftrace OOL stub. We recover the "real LR" pointing to the function 
being traced from there so that we can call into it from within the BPF 
trampoline.

- Naveen


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] powerpc64/bpf: support direct_call on livepatch function
  2025-10-09 11:27         ` Naveen N Rao
@ 2025-10-10  7:17           ` Hari Bathini
  2025-10-15  6:18             ` Naveen N Rao
  0 siblings, 1 reply; 11+ messages in thread
From: Hari Bathini @ 2025-10-10  7:17 UTC (permalink / raw)
  To: Naveen N Rao
  Cc: Madhavan Srinivasan, linuxppc-dev, Christophe Leroy,
	Michael Ellerman, Nicholas Piggin, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Song Liu, Jiri Olsa,
	Viktor Malik, live-patching, Josh Poimboeuf, Joe Lawrence,
	Jiri Kosina, linux-trace-kernel, Steven Rostedt, Masami Hiramatsu,
	Mark Rutland, Shung-Hsi Yu



On 09/10/25 4:57 pm, Naveen N Rao wrote:
> On Thu, Oct 09, 2025 at 11:19:45AM +0530, Hari Bathini wrote:
>>
>>
>> On 08/10/25 1:43 pm, Naveen N Rao wrote:
>>> On Mon, Oct 06, 2025 at 06:50:20PM +0530, Hari Bathini wrote:
>>>>
>>>>
>>>> On 06/10/25 1:22 pm, Naveen N Rao wrote:
>>>>> On Fri, Oct 03, 2025 at 12:57:54AM +0530, Hari Bathini wrote:
>>>>>> Today, livepatch takes precedence over direct_call. Instead, save the
>>>>>> state and make direct_call before handling livepatch.
>>>>>
>>>>> If we call into the BPF trampoline first and if we have
>>>>> BPF_TRAMP_F_CALL_ORIG set, does this result in the BPF trampoline
>>>>> calling the new copy of the live-patched function or the old one?
>>>>
>>>> Naveen, calls the new copy of the live-patched function..
>>>
>>> Hmm... I'm probably missing something.
>>>
>>> With ftrace OOL stubs, what I recall is that BPF trampoline derives the
>>> original function address from the OOL stub (which would be associated
>>> with the original function, not the livepatch one).
>>
>> Trampoline derives the address from LR.
> 
> Does it? I'm referring to BPF_TRAMP_F_CALL_ORIG handling in
> __arch_prepare_bpf_trampoline().


> LR at BPF trampoline entry points at
> the ftrace OOL stub. We recover the "real LR" pointing to the function
> being traced from there so that we can call into it from within the BPF
> trampoline.

Naveen, from the snippet in livepatch_handler code shared below,
the LR at BPF trmapoline entry points at the 'nop' after the call
to trampoline with 'bnectrl cr1' in the updated livepatch_handler.

Mimic'ing ftrace OOL branch instruction in livepatch_handler
with 'b	1f' (the instruction after nop) to ensure the trmapoline
derives the real LR to '1f' and jumps back into the livepatch_handler..

+       /* Jump to the direct_call */
+       bnectrl cr1
+
+       /*
+        * The address to jump after direct call is deduced based on 
ftrace OOL stub sequence.
+        * The seemingly insignificant couple of instructions below is 
to mimic that here to
+        * jump back to the livepatch handler code below.
+        */
+       nop
+       b       1f
+
+       /*
+        * Restore the state for livepatching from the livepatch stack.
+        * Before that, check if livepatch stack is intact. Use r0 for it.
+        */
+1:     mtctr   r0


I should probably improve my comments for better readability..

- Hari

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] powerpc64/bpf: support direct_call on livepatch function
  2025-10-10  7:17           ` Hari Bathini
@ 2025-10-15  6:18             ` Naveen N Rao
  2025-12-08 16:35               ` Hari Bathini
  0 siblings, 1 reply; 11+ messages in thread
From: Naveen N Rao @ 2025-10-15  6:18 UTC (permalink / raw)
  To: Hari Bathini
  Cc: Madhavan Srinivasan, linuxppc-dev, Christophe Leroy,
	Michael Ellerman, Nicholas Piggin, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Song Liu, Jiri Olsa,
	Viktor Malik, live-patching, Josh Poimboeuf, Joe Lawrence,
	Jiri Kosina, linux-trace-kernel, Steven Rostedt, Masami Hiramatsu,
	Mark Rutland, Shung-Hsi Yu

On Fri, Oct 10, 2025 at 12:47:21PM +0530, Hari Bathini wrote:
> 
> 
> On 09/10/25 4:57 pm, Naveen N Rao wrote:
> > On Thu, Oct 09, 2025 at 11:19:45AM +0530, Hari Bathini wrote:
> > > 
> > > 
> > > On 08/10/25 1:43 pm, Naveen N Rao wrote:
> > > > On Mon, Oct 06, 2025 at 06:50:20PM +0530, Hari Bathini wrote:
> > > > > 
> > > > > 
> > > > > On 06/10/25 1:22 pm, Naveen N Rao wrote:
> > > > > > On Fri, Oct 03, 2025 at 12:57:54AM +0530, Hari Bathini wrote:
> > > > > > > Today, livepatch takes precedence over direct_call. Instead, save the
> > > > > > > state and make direct_call before handling livepatch.
> > > > > > 
> > > > > > If we call into the BPF trampoline first and if we have
> > > > > > BPF_TRAMP_F_CALL_ORIG set, does this result in the BPF trampoline
> > > > > > calling the new copy of the live-patched function or the old one?
> > > > > 
> > > > > Naveen, calls the new copy of the live-patched function..
> > > > 
> > > > Hmm... I'm probably missing something.
> > > > 
> > > > With ftrace OOL stubs, what I recall is that BPF trampoline derives the
> > > > original function address from the OOL stub (which would be associated
> > > > with the original function, not the livepatch one).
> > > 
> > > Trampoline derives the address from LR.
> > 
> > Does it? I'm referring to BPF_TRAMP_F_CALL_ORIG handling in
> > __arch_prepare_bpf_trampoline().
> 
> 
> > LR at BPF trampoline entry points at
> > the ftrace OOL stub. We recover the "real LR" pointing to the function
> > being traced from there so that we can call into it from within the BPF
> > trampoline.
> 
> Naveen, from the snippet in livepatch_handler code shared below,
> the LR at BPF trmapoline entry points at the 'nop' after the call
> to trampoline with 'bnectrl cr1' in the updated livepatch_handler.
> 
> Mimic'ing ftrace OOL branch instruction in livepatch_handler
> with 'b	1f' (the instruction after nop) to ensure the trmapoline
> derives the real LR to '1f' and jumps back into the livepatch_handler..
> 
> +       /* Jump to the direct_call */
> +       bnectrl cr1
> +
> +       /*
> +        * The address to jump after direct call is deduced based on ftrace
> OOL stub sequence.
> +        * The seemingly insignificant couple of instructions below is to
> mimic that here to
> +        * jump back to the livepatch handler code below.
> +        */
> +       nop
> +       b       1f
> +
> +       /*
> +        * Restore the state for livepatching from the livepatch stack.
> +        * Before that, check if livepatch stack is intact. Use r0 for it.
> +        */
> +1:     mtctr   r0

Ah, so you are faking a ftrace OOL stub here. But, won't this mean that 
bpf_get_func_ip() won't return the function address anymore?

One of the other thoughts I had was if we could stuff the function 
address into the ftrace OOL stub. I had considered this back when I 
implemented the OOL stubs, but didn't do it due to the extra memory 
requirement. However, given the dance we're having to do, I'm now 
thinking that may make sense and can simplify the code. If we can also 
hook into livepatch, then we should be able to update the function 
address in the stub to point to the new address and the trampoline 
should then "just work" since it already saves/restores the TOC [We may 
additionally have to update the function IP in _R12, but that would be a 
minor change overall]

We will still need a way to restore livepatch TOC if the BPF trampoline 
doesn't itself call into the function, but we may be able to handle that 
if we change the return address to jump to a stub that restores the TOC 
from the livepatch stack.

> 
> 
> I should probably improve my comments for better readability..

Yes, please. I would also split the changes converting some of the hard 
coded offsets into macros into a separate patch.

- Naveen


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] powerpc64/bpf: support direct_call on livepatch function
  2025-10-15  6:18             ` Naveen N Rao
@ 2025-12-08 16:35               ` Hari Bathini
  2026-01-09 13:48                 ` Naveen N Rao
  0 siblings, 1 reply; 11+ messages in thread
From: Hari Bathini @ 2025-12-08 16:35 UTC (permalink / raw)
  To: Naveen N Rao
  Cc: Madhavan Srinivasan, linuxppc-dev, Christophe Leroy,
	Michael Ellerman, Nicholas Piggin, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Song Liu, Jiri Olsa,
	Viktor Malik, live-patching, Josh Poimboeuf, Joe Lawrence,
	Jiri Kosina, linux-trace-kernel, Steven Rostedt, Masami Hiramatsu,
	Mark Rutland, Shung-Hsi Yu

Thanks for the review, Naveen.
I was on leave for sometime and could not look into it in a while
after that.

On 15/10/25 11:48 am, Naveen N Rao wrote:
> On Fri, Oct 10, 2025 at 12:47:21PM +0530, Hari Bathini wrote:
>>
>>
>> On 09/10/25 4:57 pm, Naveen N Rao wrote:
>>> On Thu, Oct 09, 2025 at 11:19:45AM +0530, Hari Bathini wrote:
>>>>
>>>>
>>>> On 08/10/25 1:43 pm, Naveen N Rao wrote:
>>>>> On Mon, Oct 06, 2025 at 06:50:20PM +0530, Hari Bathini wrote:
>>>>>>
>>>>>>
>>>>>> On 06/10/25 1:22 pm, Naveen N Rao wrote:
>>>>>>> On Fri, Oct 03, 2025 at 12:57:54AM +0530, Hari Bathini wrote:
>>>>>>>> Today, livepatch takes precedence over direct_call. Instead, save the
>>>>>>>> state and make direct_call before handling livepatch.
>>>>>>>
>>>>>>> If we call into the BPF trampoline first and if we have
>>>>>>> BPF_TRAMP_F_CALL_ORIG set, does this result in the BPF trampoline
>>>>>>> calling the new copy of the live-patched function or the old one?
>>>>>>
>>>>>> Naveen, calls the new copy of the live-patched function..
>>>>>
>>>>> Hmm... I'm probably missing something.
>>>>>
>>>>> With ftrace OOL stubs, what I recall is that BPF trampoline derives the
>>>>> original function address from the OOL stub (which would be associated
>>>>> with the original function, not the livepatch one).
>>>>
>>>> Trampoline derives the address from LR.
>>>
>>> Does it? I'm referring to BPF_TRAMP_F_CALL_ORIG handling in
>>> __arch_prepare_bpf_trampoline().
>>
>>
>>> LR at BPF trampoline entry points at
>>> the ftrace OOL stub. We recover the "real LR" pointing to the function
>>> being traced from there so that we can call into it from within the BPF
>>> trampoline.
>>
>> Naveen, from the snippet in livepatch_handler code shared below,
>> the LR at BPF trmapoline entry points at the 'nop' after the call
>> to trampoline with 'bnectrl cr1' in the updated livepatch_handler.
>>
>> Mimic'ing ftrace OOL branch instruction in livepatch_handler
>> with 'b	1f' (the instruction after nop) to ensure the trmapoline
>> derives the real LR to '1f' and jumps back into the livepatch_handler..
>>
>> +       /* Jump to the direct_call */
>> +       bnectrl cr1
>> +
>> +       /*
>> +        * The address to jump after direct call is deduced based on ftrace
>> OOL stub sequence.
>> +        * The seemingly insignificant couple of instructions below is to
>> mimic that here to
>> +        * jump back to the livepatch handler code below.
>> +        */
>> +       nop
>> +       b       1f
>> +
>> +       /*
>> +        * Restore the state for livepatching from the livepatch stack.
>> +        * Before that, check if livepatch stack is intact. Use r0 for it.
>> +        */
>> +1:     mtctr   r0
> 
> Ah, so you are faking a ftrace OOL stub here. But, won't this mean that

Yeah.

> bpf_get_func_ip() won't return the function address anymore?

Right. I do agree it can have issues in some scenarios.

> 
> One of the other thoughts I had was if we could stuff the function
> address into the ftrace OOL stub. I had considered this back when I
> implemented the OOL stubs, but didn't do it due to the extra memory
> requirement. However, given the dance we're having to do, I'm now
> thinking that may make sense and can simplify the code. If we can also
> hook into livepatch, then we should be able to update the function
> address in the stub to point to the new address and the trampoline
> should then "just work" since it already saves/restores the TOC [We may
> additionally have to update the function IP in _R12, but that would be a
> minor change overall]
> 
> We will still need a way to restore livepatch TOC if the BPF trampoline
> doesn't itself call into the function, but we may be able to handle that
> if we change the return address to jump to a stub that restores the TOC
> from the livepatch stack.

Sounds doable. Looking into a couple of other things at the moment
though. Will try out this suggestion and get back post that.
Having said that, your thoughts on whether the current approach
is a viable option if bpf_get_func_ip() can be fixed somehow?

- Hari

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] powerpc64/bpf: support direct_call on livepatch function
  2025-12-08 16:35               ` Hari Bathini
@ 2026-01-09 13:48                 ` Naveen N Rao
  0 siblings, 0 replies; 11+ messages in thread
From: Naveen N Rao @ 2026-01-09 13:48 UTC (permalink / raw)
  To: Hari Bathini
  Cc: Madhavan Srinivasan, linuxppc-dev, Christophe Leroy,
	Michael Ellerman, Nicholas Piggin, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Song Liu, Jiri Olsa,
	Viktor Malik, live-patching, Josh Poimboeuf, Joe Lawrence,
	Jiri Kosina, linux-trace-kernel, Steven Rostedt, Masami Hiramatsu,
	Mark Rutland, Shung-Hsi Yu

On Mon, Dec 08, 2025 at 10:05:44PM +0530, Hari Bathini wrote:
> > 
> > One of the other thoughts I had was if we could stuff the function
> > address into the ftrace OOL stub. I had considered this back when I
> > implemented the OOL stubs, but didn't do it due to the extra memory
> > requirement. However, given the dance we're having to do, I'm now
> > thinking that may make sense and can simplify the code. If we can also
> > hook into livepatch, then we should be able to update the function
> > address in the stub to point to the new address and the trampoline
> > should then "just work" since it already saves/restores the TOC [We may
> > additionally have to update the function IP in _R12, but that would be a
> > minor change overall]
> > 
> > We will still need a way to restore livepatch TOC if the BPF trampoline
> > doesn't itself call into the function, but we may be able to handle that
> > if we change the return address to jump to a stub that restores the TOC
> > from the livepatch stack.
> 
> Sounds doable. Looking into a couple of other things at the moment
> though. Will try out this suggestion and get back post that.
> Having said that, your thoughts on whether the current approach
> is a viable option if bpf_get_func_ip() can be fixed somehow?

Oh, that's fine -- feel free to go with whatever approach you think 
works best.


- Naveen


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2026-01-09 13:51 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-10-02 19:27 [PATCH] powerpc64/bpf: support direct_call on livepatch function Hari Bathini
2025-10-03 11:21 ` kernel test robot
2025-10-06  7:52 ` Naveen N Rao
2025-10-06 13:20   ` Hari Bathini
2025-10-08  8:13     ` Naveen N Rao
2025-10-09  5:49       ` Hari Bathini
2025-10-09 11:27         ` Naveen N Rao
2025-10-10  7:17           ` Hari Bathini
2025-10-15  6:18             ` Naveen N Rao
2025-12-08 16:35               ` Hari Bathini
2026-01-09 13:48                 ` Naveen N Rao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox