linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/1] bpf: take advantage of stack_depth tracking in powerpc JIT
@ 2017-09-01 18:53 Sandipan Das
  2017-09-01 19:03 ` Naveen N. Rao
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Sandipan Das @ 2017-09-01 18:53 UTC (permalink / raw)
  To: mpe; +Cc: naveen.n.rao, daniel, ast, netdev, linuxppc-dev

Take advantage of stack_depth tracking, originally introduced for
x64, in powerpc JIT as well. Round up allocated stack by 16 bytes
to make sure it stays aligned for functions called from JITed bpf
program.

Signed-off-by: Sandipan Das <sandipan@linux.vnet.ibm.com>
---
 arch/powerpc/net/bpf_jit64.h      |  7 ++++---
 arch/powerpc/net/bpf_jit_comp64.c | 16 ++++++++++------
 2 files changed, 14 insertions(+), 9 deletions(-)

diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
index 62fa7589db2b..8bdef7ed28a8 100644
--- a/arch/powerpc/net/bpf_jit64.h
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -23,7 +23,7 @@
  *		[   nv gpr save area	] 8*8		|
  *		[    tail_call_cnt	] 8		|
  *		[    local_tmp_var	] 8		|
- * fp (r31) -->	[   ebpf stack space	] 512		|
+ * fp (r31) -->	[   ebpf stack space	] upto 512	|
  *		[     frame header	] 32/112	|
  * sp (r1) --->	[    stack pointer	] --------------
  */
@@ -32,8 +32,8 @@
 #define BPF_PPC_STACK_SAVE	(8*8)
 /* for bpf JIT code internal usage */
 #define BPF_PPC_STACK_LOCALS	16
-/* Ensure this is quadword aligned */
-#define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + MAX_BPF_STACK + \
+/* stack frame excluding BPF stack, ensure this is quadword aligned */
+#define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
 				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
 
 #ifndef __ASSEMBLY__
@@ -103,6 +103,7 @@ struct codegen_context {
 	 */
 	unsigned int seen;
 	unsigned int idx;
+	unsigned int stack_size;
 };
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 6ba5d253e857..a01362c88f6a 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -69,7 +69,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
 static int bpf_jit_stack_local(struct codegen_context *ctx)
 {
 	if (bpf_has_stack_frame(ctx))
-		return STACK_FRAME_MIN_SIZE + MAX_BPF_STACK;
+		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
 	else
 		return -(BPF_PPC_STACK_SAVE + 16);
 }
@@ -82,8 +82,9 @@ static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
 {
 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
-		return (bpf_has_stack_frame(ctx) ? BPF_PPC_STACKFRAME : 0)
-							- (8 * (32 - reg));
+		return (bpf_has_stack_frame(ctx) ?
+			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
+				- (8 * (32 - reg));
 
 	pr_err("BPF JIT is asking about unknown registers");
 	BUG();
@@ -134,7 +135,7 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
 			PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
 		}
 
-		PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
+		PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
 	}
 
 	/*
@@ -161,7 +162,7 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
 	/* Setup frame pointer to point to the bpf stack area */
 	if (bpf_is_seen_register(ctx, BPF_REG_FP))
 		PPC_ADDI(b2p[BPF_REG_FP], 1,
-				STACK_FRAME_MIN_SIZE + MAX_BPF_STACK);
+				STACK_FRAME_MIN_SIZE + ctx->stack_size);
 }
 
 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
@@ -183,7 +184,7 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
 
 	/* Tear down our stack frame */
 	if (bpf_has_stack_frame(ctx)) {
-		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
+		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
 		if (ctx->seen & SEEN_FUNC) {
 			PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
 			PPC_MTLR(0);
@@ -993,6 +994,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 
 	memset(&cgctx, 0, sizeof(struct codegen_context));
 
+	/* Make sure that the stack is quadword aligned. */
+	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
+
 	/* Scouting faux-generate pass 0 */
 	if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
 		/* We hit something illegal or unsupported. */
-- 
2.13.5

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/1] bpf: take advantage of stack_depth tracking in powerpc JIT
  2017-09-01 18:53 [PATCH 1/1] bpf: take advantage of stack_depth tracking in powerpc JIT Sandipan Das
@ 2017-09-01 19:03 ` Naveen N. Rao
  2017-09-01 19:15 ` Daniel Borkmann
  2017-11-07 23:30 ` [1/1] " Michael Ellerman
  2 siblings, 0 replies; 4+ messages in thread
From: Naveen N. Rao @ 2017-09-01 19:03 UTC (permalink / raw)
  To: Sandipan Das; +Cc: mpe, daniel, ast, netdev, linuxppc-dev

On 2017/09/02 12:23AM, Sandipan Das wrote:
> Take advantage of stack_depth tracking, originally introduced for
> x64, in powerpc JIT as well. Round up allocated stack by 16 bytes
> to make sure it stays aligned for functions called from JITed bpf
> program.
> 
> Signed-off-by: Sandipan Das <sandipan@linux.vnet.ibm.com>
> ---

LGTM, thanks!
Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>

Michael,
Seeing as this is powerpc specific, can you please take this through 
your tree?


Thanks,
Naveen

>  arch/powerpc/net/bpf_jit64.h      |  7 ++++---
>  arch/powerpc/net/bpf_jit_comp64.c | 16 ++++++++++------
>  2 files changed, 14 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
> index 62fa7589db2b..8bdef7ed28a8 100644
> --- a/arch/powerpc/net/bpf_jit64.h
> +++ b/arch/powerpc/net/bpf_jit64.h
> @@ -23,7 +23,7 @@
>   *		[   nv gpr save area	] 8*8		|
>   *		[    tail_call_cnt	] 8		|
>   *		[    local_tmp_var	] 8		|
> - * fp (r31) -->	[   ebpf stack space	] 512		|
> + * fp (r31) -->	[   ebpf stack space	] upto 512	|
>   *		[     frame header	] 32/112	|
>   * sp (r1) --->	[    stack pointer	] --------------
>   */
> @@ -32,8 +32,8 @@
>  #define BPF_PPC_STACK_SAVE	(8*8)
>  /* for bpf JIT code internal usage */
>  #define BPF_PPC_STACK_LOCALS	16
> -/* Ensure this is quadword aligned */
> -#define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + MAX_BPF_STACK + \
> +/* stack frame excluding BPF stack, ensure this is quadword aligned */
> +#define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
>  				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
> 
>  #ifndef __ASSEMBLY__
> @@ -103,6 +103,7 @@ struct codegen_context {
>  	 */
>  	unsigned int seen;
>  	unsigned int idx;
> +	unsigned int stack_size;
>  };
> 
>  #endif /* !__ASSEMBLY__ */
> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
> index 6ba5d253e857..a01362c88f6a 100644
> --- a/arch/powerpc/net/bpf_jit_comp64.c
> +++ b/arch/powerpc/net/bpf_jit_comp64.c
> @@ -69,7 +69,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
>  static int bpf_jit_stack_local(struct codegen_context *ctx)
>  {
>  	if (bpf_has_stack_frame(ctx))
> -		return STACK_FRAME_MIN_SIZE + MAX_BPF_STACK;
> +		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
>  	else
>  		return -(BPF_PPC_STACK_SAVE + 16);
>  }
> @@ -82,8 +82,9 @@ static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
>  static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
>  {
>  	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
> -		return (bpf_has_stack_frame(ctx) ? BPF_PPC_STACKFRAME : 0)
> -							- (8 * (32 - reg));
> +		return (bpf_has_stack_frame(ctx) ?
> +			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
> +				- (8 * (32 - reg));
> 
>  	pr_err("BPF JIT is asking about unknown registers");
>  	BUG();
> @@ -134,7 +135,7 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
>  			PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
>  		}
> 
> -		PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
> +		PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
>  	}
> 
>  	/*
> @@ -161,7 +162,7 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
>  	/* Setup frame pointer to point to the bpf stack area */
>  	if (bpf_is_seen_register(ctx, BPF_REG_FP))
>  		PPC_ADDI(b2p[BPF_REG_FP], 1,
> -				STACK_FRAME_MIN_SIZE + MAX_BPF_STACK);
> +				STACK_FRAME_MIN_SIZE + ctx->stack_size);
>  }
> 
>  static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
> @@ -183,7 +184,7 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
> 
>  	/* Tear down our stack frame */
>  	if (bpf_has_stack_frame(ctx)) {
> -		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
> +		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
>  		if (ctx->seen & SEEN_FUNC) {
>  			PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
>  			PPC_MTLR(0);
> @@ -993,6 +994,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
> 
>  	memset(&cgctx, 0, sizeof(struct codegen_context));
> 
> +	/* Make sure that the stack is quadword aligned. */
> +	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
> +
>  	/* Scouting faux-generate pass 0 */
>  	if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
>  		/* We hit something illegal or unsupported. */
> -- 
> 2.13.5
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/1] bpf: take advantage of stack_depth tracking in powerpc JIT
  2017-09-01 18:53 [PATCH 1/1] bpf: take advantage of stack_depth tracking in powerpc JIT Sandipan Das
  2017-09-01 19:03 ` Naveen N. Rao
@ 2017-09-01 19:15 ` Daniel Borkmann
  2017-11-07 23:30 ` [1/1] " Michael Ellerman
  2 siblings, 0 replies; 4+ messages in thread
From: Daniel Borkmann @ 2017-09-01 19:15 UTC (permalink / raw)
  To: Sandipan Das, mpe; +Cc: naveen.n.rao, ast, netdev, linuxppc-dev

On 09/01/2017 08:53 PM, Sandipan Das wrote:
> Take advantage of stack_depth tracking, originally introduced for
> x64, in powerpc JIT as well. Round up allocated stack by 16 bytes
> to make sure it stays aligned for functions called from JITed bpf
> program.
>
> Signed-off-by: Sandipan Das <sandipan@linux.vnet.ibm.com>

Awesome, thanks for following up! :)

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [1/1] bpf: take advantage of stack_depth tracking in powerpc JIT
  2017-09-01 18:53 [PATCH 1/1] bpf: take advantage of stack_depth tracking in powerpc JIT Sandipan Das
  2017-09-01 19:03 ` Naveen N. Rao
  2017-09-01 19:15 ` Daniel Borkmann
@ 2017-11-07 23:30 ` Michael Ellerman
  2 siblings, 0 replies; 4+ messages in thread
From: Michael Ellerman @ 2017-11-07 23:30 UTC (permalink / raw)
  To: Sandipan Das; +Cc: netdev, naveen.n.rao, linuxppc-dev, daniel, ast

On Fri, 2017-09-01 at 18:53:01 UTC, Sandipan Das wrote:
> Take advantage of stack_depth tracking, originally introduced for
> x64, in powerpc JIT as well. Round up allocated stack by 16 bytes
> to make sure it stays aligned for functions called from JITed bpf
> program.
> 
> Signed-off-by: Sandipan Das <sandipan@linux.vnet.ibm.com>
> Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>

Applied to powerpc next, thanks.

https://git.kernel.org/powerpc/c/ac0761ebcb08830d8f64b9181f6736

cheers

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2017-11-07 23:30 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-09-01 18:53 [PATCH 1/1] bpf: take advantage of stack_depth tracking in powerpc JIT Sandipan Das
2017-09-01 19:03 ` Naveen N. Rao
2017-09-01 19:15 ` Daniel Borkmann
2017-11-07 23:30 ` [1/1] " Michael Ellerman

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).