public inbox for stable@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/5] powerpc64/bpf: do not increment tailcall count when prog is NULL
       [not found] <20260220063933.196141-1-hbathini@linux.ibm.com>
@ 2026-02-20  6:39 ` Hari Bathini
  2026-02-21  3:40   ` Venkat Rao Bagalkote
  2026-02-20  6:39 ` [PATCH v2 2/5] powerpc64/bpf: fix the address returned by bpf_get_func_ip Hari Bathini
  1 sibling, 1 reply; 5+ messages in thread
From: Hari Bathini @ 2026-02-20  6:39 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: bpf, Madhavan Srinivasan, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Saket Kumar Bhaskar, Abhishek Dubey,
	Venkat Rao Bagalkote, stable

Do not increment tailcall count, if tailcall did not succeed due to
missing BPF program.

Fixes: ce0761419fae ("powerpc/bpf: Implement support for tail calls")
Cc: stable@vger.kernel.org
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
---

* No changes since v1.


 arch/powerpc/net/bpf_jit_comp64.c | 39 +++++++++++++++++--------------
 1 file changed, 21 insertions(+), 18 deletions(-)

diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index b1a3945ccc9f..44ce8a8783f9 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -522,9 +522,30 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
 
 	/*
 	 * tail_call_info++; <- Actual value of tcc here
+	 * Writeback this updated value only if tailcall succeeds.
 	 */
 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
 
+	/* prog = array->ptrs[index]; */
+	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_2), b2p_index, 8));
+	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), b2p_bpf_array));
+	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
+			offsetof(struct bpf_array, ptrs)));
+
+	/*
+	 * if (prog == NULL)
+	 *   goto out;
+	 */
+	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_2), 0));
+	PPC_BCC_SHORT(COND_EQ, out);
+
+	/* goto *(prog->bpf_func + prologue_size); */
+	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
+			offsetof(struct bpf_prog, bpf_func)));
+	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
+			  FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
+	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_2)));
+
 	/*
 	 * Before writing updated tail_call_info, distinguish if current frame
 	 * is storing a reference to tail_call_info or actual tcc value in
@@ -539,24 +560,6 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
 	/* Writeback updated value to tail_call_info */
 	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
 
-	/* prog = array->ptrs[index]; */
-	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
-	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
-	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
-
-	/*
-	 * if (prog == NULL)
-	 *   goto out;
-	 */
-	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
-	PPC_BCC_SHORT(COND_EQ, out);
-
-	/* goto *(prog->bpf_func + prologue_size); */
-	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
-	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
-			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
-	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
-
 	/* tear down stack, restore NVRs, ... */
 	bpf_jit_emit_common_epilogue(image, ctx);
 
-- 
2.53.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 2/5] powerpc64/bpf: fix the address returned by bpf_get_func_ip
       [not found] <20260220063933.196141-1-hbathini@linux.ibm.com>
  2026-02-20  6:39 ` [PATCH v2 1/5] powerpc64/bpf: do not increment tailcall count when prog is NULL Hari Bathini
@ 2026-02-20  6:39 ` Hari Bathini
  2026-02-21  3:41   ` Venkat Rao Bagalkote
  2026-02-22 12:21   ` adubey
  1 sibling, 2 replies; 5+ messages in thread
From: Hari Bathini @ 2026-02-20  6:39 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: bpf, Madhavan Srinivasan, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Saket Kumar Bhaskar, Abhishek Dubey,
	Venkat Rao Bagalkote, stable

bpf_get_func_ip() helper function returns the address of the traced
function. It relies on the IP address stored at ctx - 16 by the bpf
trampoline. On 64-bit powerpc, this address is recovered from LR
accounting for OOL trampoline. But the address stored here was off
by 4-bytes. Ensure the address is the actual start of the traced
function.

Reported-by: Abhishek Dubey <adubey@linux.ibm.com>
Fixes: d243b62b7bd3 ("powerpc64/bpf: Add support for bpf trampolines")
Cc: stable@vger.kernel.org
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
---

* No changes since v1.


 arch/powerpc/net/bpf_jit_comp.c | 21 +++++++++++++--------
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 987cd9fb0f37..fb6cc1f832a8 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -786,8 +786,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
 	 *                              [ reg argN          ]
 	 *                              [ ...               ]
 	 *       regs_off               [ reg_arg1          ] prog ctx context
-	 *       nregs_off              [ args count        ]
-	 *       ip_off                 [ traced function   ]
+	 *       nregs_off              [ args count        ] ((u64 *)prog_ctx)[-1]
+	 *       ip_off                 [ traced function   ] ((u64 *)prog_ctx)[-2]
 	 *                              [ ...               ]
 	 *       run_ctx_off            [ bpf_tramp_run_ctx ]
 	 *                              [ reg argN          ]
@@ -895,7 +895,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
 
 	bpf_trampoline_save_args(image, ctx, func_frame_offset, nr_regs, regs_off);
 
-	/* Save our return address */
+	/* Save our LR/return address */
 	EMIT(PPC_RAW_MFLR(_R3));
 	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
 		EMIT(PPC_RAW_STL(_R3, _R1, alt_lr_off));
@@ -903,24 +903,29 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
 		EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF));
 
 	/*
-	 * Save ip address of the traced function.
-	 * We could recover this from LR, but we will need to address for OOL trampoline,
-	 * and optional GEP area.
+	 * Get IP address of the traced function.
+	 * In case of CONFIG_PPC_FTRACE_OUT_OF_LINE or BPF program, LR
+	 * points to the instruction after the 'bl' instruction in the OOL stub.
+	 * Refer to ftrace_init_ool_stub() and bpf_arch_text_poke() for OOL stub
+	 * of kernel functions and bpf programs respectively.
+	 * Recover kernel function/bpf program address from the unconditional
+	 * branch instruction at the end of OOL stub.
 	 */
 	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) || flags & BPF_TRAMP_F_IP_ARG) {
 		EMIT(PPC_RAW_LWZ(_R4, _R3, 4));
 		EMIT(PPC_RAW_SLWI(_R4, _R4, 6));
 		EMIT(PPC_RAW_SRAWI(_R4, _R4, 6));
 		EMIT(PPC_RAW_ADD(_R3, _R3, _R4));
-		EMIT(PPC_RAW_ADDI(_R3, _R3, 4));
 	}
 
 	if (flags & BPF_TRAMP_F_IP_ARG)
 		EMIT(PPC_RAW_STL(_R3, _R1, ip_off));
 
-	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
+	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
 		/* Fake our LR for unwind */
+		EMIT(PPC_RAW_ADDI(_R3, _R3, 4));
 		EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF));
+	}
 
 	/* Save function arg count -- see bpf_get_func_arg_cnt() */
 	EMIT(PPC_RAW_LI(_R3, nr_regs));
-- 
2.53.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 1/5] powerpc64/bpf: do not increment tailcall count when prog is NULL
  2026-02-20  6:39 ` [PATCH v2 1/5] powerpc64/bpf: do not increment tailcall count when prog is NULL Hari Bathini
@ 2026-02-21  3:40   ` Venkat Rao Bagalkote
  0 siblings, 0 replies; 5+ messages in thread
From: Venkat Rao Bagalkote @ 2026-02-21  3:40 UTC (permalink / raw)
  To: Hari Bathini, linuxppc-dev
  Cc: bpf, Madhavan Srinivasan, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Saket Kumar Bhaskar, Abhishek Dubey, stable


On 20/02/26 12:09 pm, Hari Bathini wrote:
> Do not increment tailcall count, if tailcall did not succeed due to
> missing BPF program.
>
> Fixes: ce0761419fae ("powerpc/bpf: Implement support for tail calls")
> Cc: stable@vger.kernel.org
> Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
> ---
>
> * No changes since v1.
>
>
>   arch/powerpc/net/bpf_jit_comp64.c | 39 +++++++++++++++++--------------
>   1 file changed, 21 insertions(+), 18 deletions(-)
>
> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
> index b1a3945ccc9f..44ce8a8783f9 100644
> --- a/arch/powerpc/net/bpf_jit_comp64.c
> +++ b/arch/powerpc/net/bpf_jit_comp64.c
> @@ -522,9 +522,30 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
>   
>   	/*
>   	 * tail_call_info++; <- Actual value of tcc here
> +	 * Writeback this updated value only if tailcall succeeds.
>   	 */
>   	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
>   
> +	/* prog = array->ptrs[index]; */
> +	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_2), b2p_index, 8));
> +	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), b2p_bpf_array));
> +	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
> +			offsetof(struct bpf_array, ptrs)));
> +
> +	/*
> +	 * if (prog == NULL)
> +	 *   goto out;
> +	 */
> +	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_2), 0));
> +	PPC_BCC_SHORT(COND_EQ, out);
> +
> +	/* goto *(prog->bpf_func + prologue_size); */
> +	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
> +			offsetof(struct bpf_prog, bpf_func)));
> +	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2),
> +			  FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
> +	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_2)));
> +
>   	/*
>   	 * Before writing updated tail_call_info, distinguish if current frame
>   	 * is storing a reference to tail_call_info or actual tcc value in
> @@ -539,24 +560,6 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
>   	/* Writeback updated value to tail_call_info */
>   	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
>   
> -	/* prog = array->ptrs[index]; */
> -	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
> -	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
> -	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
> -
> -	/*
> -	 * if (prog == NULL)
> -	 *   goto out;
> -	 */
> -	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
> -	PPC_BCC_SHORT(COND_EQ, out);
> -
> -	/* goto *(prog->bpf_func + prologue_size); */
> -	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
> -	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
> -			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
> -	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
> -
>   	/* tear down stack, restore NVRs, ... */
>   	bpf_jit_emit_common_epilogue(image, ctx);
>   

Tested this by patch, Please add below tag.

Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>


Regards,

Venkat.



^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 2/5] powerpc64/bpf: fix the address returned by bpf_get_func_ip
  2026-02-20  6:39 ` [PATCH v2 2/5] powerpc64/bpf: fix the address returned by bpf_get_func_ip Hari Bathini
@ 2026-02-21  3:41   ` Venkat Rao Bagalkote
  2026-02-22 12:21   ` adubey
  1 sibling, 0 replies; 5+ messages in thread
From: Venkat Rao Bagalkote @ 2026-02-21  3:41 UTC (permalink / raw)
  To: Hari Bathini, linuxppc-dev
  Cc: bpf, Madhavan Srinivasan, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Saket Kumar Bhaskar, Abhishek Dubey, stable


On 20/02/26 12:09 pm, Hari Bathini wrote:
> bpf_get_func_ip() helper function returns the address of the traced
> function. It relies on the IP address stored at ctx - 16 by the bpf
> trampoline. On 64-bit powerpc, this address is recovered from LR
> accounting for OOL trampoline. But the address stored here was off
> by 4-bytes. Ensure the address is the actual start of the traced
> function.
>
> Reported-by: Abhishek Dubey <adubey@linux.ibm.com>
> Fixes: d243b62b7bd3 ("powerpc64/bpf: Add support for bpf trampolines")
> Cc: stable@vger.kernel.org
> Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
> ---
>
> * No changes since v1.
>
>
>   arch/powerpc/net/bpf_jit_comp.c | 21 +++++++++++++--------
>   1 file changed, 13 insertions(+), 8 deletions(-)
>
> diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
> index 987cd9fb0f37..fb6cc1f832a8 100644
> --- a/arch/powerpc/net/bpf_jit_comp.c
> +++ b/arch/powerpc/net/bpf_jit_comp.c
> @@ -786,8 +786,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
>   	 *                              [ reg argN          ]
>   	 *                              [ ...               ]
>   	 *       regs_off               [ reg_arg1          ] prog ctx context
> -	 *       nregs_off              [ args count        ]
> -	 *       ip_off                 [ traced function   ]
> +	 *       nregs_off              [ args count        ] ((u64 *)prog_ctx)[-1]
> +	 *       ip_off                 [ traced function   ] ((u64 *)prog_ctx)[-2]
>   	 *                              [ ...               ]
>   	 *       run_ctx_off            [ bpf_tramp_run_ctx ]
>   	 *                              [ reg argN          ]
> @@ -895,7 +895,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
>   
>   	bpf_trampoline_save_args(image, ctx, func_frame_offset, nr_regs, regs_off);
>   
> -	/* Save our return address */
> +	/* Save our LR/return address */
>   	EMIT(PPC_RAW_MFLR(_R3));
>   	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
>   		EMIT(PPC_RAW_STL(_R3, _R1, alt_lr_off));
> @@ -903,24 +903,29 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
>   		EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF));
>   
>   	/*
> -	 * Save ip address of the traced function.
> -	 * We could recover this from LR, but we will need to address for OOL trampoline,
> -	 * and optional GEP area.
> +	 * Get IP address of the traced function.
> +	 * In case of CONFIG_PPC_FTRACE_OUT_OF_LINE or BPF program, LR
> +	 * points to the instruction after the 'bl' instruction in the OOL stub.
> +	 * Refer to ftrace_init_ool_stub() and bpf_arch_text_poke() for OOL stub
> +	 * of kernel functions and bpf programs respectively.
> +	 * Recover kernel function/bpf program address from the unconditional
> +	 * branch instruction at the end of OOL stub.
>   	 */
>   	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) || flags & BPF_TRAMP_F_IP_ARG) {
>   		EMIT(PPC_RAW_LWZ(_R4, _R3, 4));
>   		EMIT(PPC_RAW_SLWI(_R4, _R4, 6));
>   		EMIT(PPC_RAW_SRAWI(_R4, _R4, 6));
>   		EMIT(PPC_RAW_ADD(_R3, _R3, _R4));
> -		EMIT(PPC_RAW_ADDI(_R3, _R3, 4));
>   	}
>   
>   	if (flags & BPF_TRAMP_F_IP_ARG)
>   		EMIT(PPC_RAW_STL(_R3, _R1, ip_off));
>   
> -	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
> +	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
>   		/* Fake our LR for unwind */
> +		EMIT(PPC_RAW_ADDI(_R3, _R3, 4));
>   		EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF));
> +	}
>   
>   	/* Save function arg count -- see bpf_get_func_arg_cnt() */
>   	EMIT(PPC_RAW_LI(_R3, nr_regs));


./test_progs -t get_func_ip_test
#139     get_func_ip_test:OK
Summary: 1/0 PASSED, 0 SKIPPED, 0 FAILED


Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>


Regards,

Venkat.



^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 2/5] powerpc64/bpf: fix the address returned by bpf_get_func_ip
  2026-02-20  6:39 ` [PATCH v2 2/5] powerpc64/bpf: fix the address returned by bpf_get_func_ip Hari Bathini
  2026-02-21  3:41   ` Venkat Rao Bagalkote
@ 2026-02-22 12:21   ` adubey
  1 sibling, 0 replies; 5+ messages in thread
From: adubey @ 2026-02-22 12:21 UTC (permalink / raw)
  To: Hari Bathini
  Cc: linuxppc-dev, bpf, Madhavan Srinivasan, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Saket Kumar Bhaskar,
	Venkat Rao Bagalkote, stable

On 2026-02-20 12:09, Hari Bathini wrote:
> bpf_get_func_ip() helper function returns the address of the traced
> function. It relies on the IP address stored at ctx - 16 by the bpf
> trampoline. On 64-bit powerpc, this address is recovered from LR
> accounting for OOL trampoline. But the address stored here was off
> by 4-bytes. Ensure the address is the actual start of the traced
> function.
> 
> Reported-by: Abhishek Dubey <adubey@linux.ibm.com>
> Fixes: d243b62b7bd3 ("powerpc64/bpf: Add support for bpf trampolines")
> Cc: stable@vger.kernel.org
> Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
> ---
> 
> * No changes since v1.
> 
> 
>  arch/powerpc/net/bpf_jit_comp.c | 21 +++++++++++++--------
>  1 file changed, 13 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/powerpc/net/bpf_jit_comp.c 
> b/arch/powerpc/net/bpf_jit_comp.c
> index 987cd9fb0f37..fb6cc1f832a8 100644
> --- a/arch/powerpc/net/bpf_jit_comp.c
> +++ b/arch/powerpc/net/bpf_jit_comp.c
> @@ -786,8 +786,8 @@ static int __arch_prepare_bpf_trampoline(struct
> bpf_tramp_image *im, void *rw_im
>  	 *                              [ reg argN          ]
>  	 *                              [ ...               ]
>  	 *       regs_off               [ reg_arg1          ] prog ctx 
> context
prog ctx context/prog_ctx context/prog_ctx, to be in sync with tags 
below.
please refer s390's field tagging
> -	 *       nregs_off              [ args count        ]
> -	 *       ip_off                 [ traced function   ]
> +	 *       nregs_off              [ args count        ] ((u64 
> *)prog_ctx)[-1]
> +	 *       ip_off                 [ traced function   ] ((u64 
> *)prog_ctx)[-2]
>  	 *                              [ ...               ]
>  	 *       run_ctx_off            [ bpf_tramp_run_ctx ]
>  	 *                              [ reg argN          ]
> @@ -895,7 +895,7 @@ static int __arch_prepare_bpf_trampoline(struct
> bpf_tramp_image *im, void *rw_im
> 
>  	bpf_trampoline_save_args(image, ctx, func_frame_offset, nr_regs, 
> regs_off);
> 
> -	/* Save our return address */
> +	/* Save our LR/return address */
>  	EMIT(PPC_RAW_MFLR(_R3));
>  	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
>  		EMIT(PPC_RAW_STL(_R3, _R1, alt_lr_off));
> @@ -903,24 +903,29 @@ static int __arch_prepare_bpf_trampoline(struct
> bpf_tramp_image *im, void *rw_im
>  		EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF));
> 
>  	/*
> -	 * Save ip address of the traced function.
> -	 * We could recover this from LR, but we will need to address for
> OOL trampoline,
> -	 * and optional GEP area.
> +	 * Get IP address of the traced function.
Get/Derive
> +	 * In case of CONFIG_PPC_FTRACE_OUT_OF_LINE or BPF program, LR
> +	 * points to the instruction after the 'bl' instruction in the OOL 
> stub.
> +	 * Refer to ftrace_init_ool_stub() and bpf_arch_text_poke() for OOL 
> stub
> +	 * of kernel functions and bpf programs respectively.
> +	 * Recover kernel function/bpf program address from the unconditional
> +	 * branch instruction at the end of OOL stub.
>  	 */
>  	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) || flags & 
> BPF_TRAMP_F_IP_ARG) {
>  		EMIT(PPC_RAW_LWZ(_R4, _R3, 4));
Please add comment what R4 points to; for easy referencing
>  		EMIT(PPC_RAW_SLWI(_R4, _R4, 6));
>  		EMIT(PPC_RAW_SRAWI(_R4, _R4, 6));
>  		EMIT(PPC_RAW_ADD(_R3, _R3, _R4));
> -		EMIT(PPC_RAW_ADDI(_R3, _R3, 4));
>  	}
> 
>  	if (flags & BPF_TRAMP_F_IP_ARG)
>  		EMIT(PPC_RAW_STL(_R3, _R1, ip_off));
> 
> -	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
> +	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
>  		/* Fake our LR for unwind */
> +		EMIT(PPC_RAW_ADDI(_R3, _R3, 4));
>  		EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF));
> +	}
> 
>  	/* Save function arg count -- see bpf_get_func_arg_cnt() */
>  	EMIT(PPC_RAW_LI(_R3, nr_regs));
-Abhishek

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2026-02-22 12:22 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20260220063933.196141-1-hbathini@linux.ibm.com>
2026-02-20  6:39 ` [PATCH v2 1/5] powerpc64/bpf: do not increment tailcall count when prog is NULL Hari Bathini
2026-02-21  3:40   ` Venkat Rao Bagalkote
2026-02-20  6:39 ` [PATCH v2 2/5] powerpc64/bpf: fix the address returned by bpf_get_func_ip Hari Bathini
2026-02-21  3:41   ` Venkat Rao Bagalkote
2026-02-22 12:21   ` adubey

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox