* [RFC PATCH v2] arm64: ftrace: Add shadow call stack protection
@ 2022-12-01 18:17 Ard Biesheuvel
2022-12-09 15:12 ` Ard Biesheuvel
0 siblings, 1 reply; 2+ messages in thread
From: Ard Biesheuvel @ 2022-12-01 18:17 UTC (permalink / raw)
To: linux-arm-kernel; +Cc: Ard Biesheuvel, Mark Rutland, Kees Cook
The low-level ftrace code performs some manipulations of the return
address that might be vulnerable to abuse as a gadget. We'd prefer to
protect this code using PAC where we can, but due to the fact that this
breaks pointer equality in ways that may interfere with the operation of
ftrace in particular or the backtrace code in general, this needs some
more careful thought.
In the meantime, let's make the ftrace_caller() and return_to_handler()
routines a bit more robust when shadow call stacks are enabled, by
shadowing the return addresses that are captured and potentially
manipulated by ftrace.
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
This supersedes "[PATCH 4/4] arm64: ftrace: Add return address
protection" sent out on the 29th of November.
arch/arm64/kernel/entry-ftrace.S | 27 ++++++++++++++++++++
1 file changed, 27 insertions(+)
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 30cc2a9d1757a6a7..4ce262e7d9456761 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -12,6 +12,7 @@
#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
+#include <asm/scs.h>
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
/*
@@ -36,6 +37,11 @@
SYM_CODE_START(ftrace_caller)
bti c
+#if defined(CONFIG_SHADOW_CALL_STACK) && !defined(CONFIG_DYNAMIC_SCS)
+ /* Push the callsite's LR and the current LR to the shadow stack */
+ stp x9, x30, [scs_sp], #16
+#endif
+
/* Save original SP */
mov x10, sp
@@ -93,6 +99,24 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
/* Restore the callsite's SP */
add sp, sp, #FREGS_SIZE + 32
+#if defined(CONFIG_SHADOW_CALL_STACK) && !defined(CONFIG_DYNAMIC_SCS)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /*
+ * The callsite's LR will be popped from the shadow call stack in
+ * return_to_handler() if a return via that function was inserted into
+ * the call stack by ftrace. That means we should leave the callsite's
+ * LR on the shadow call stack, and only pop the return address that
+ * takes us back to the callsite.
+ */
+ adr x10, return_to_handler
+ cmp x10, x30
+ b.ne 0f
+ ldr x9, [scs_sp, #-8]!
+ ret x9
+#endif
+ /* Pop our return address and the callsite's LR from the shadow stack */
+0: ldp x30, x9, [scs_sp, #-16]!
+#endif
ret x9
SYM_CODE_END(ftrace_caller)
@@ -265,6 +289,9 @@ SYM_CODE_START(return_to_handler)
ldp x6, x7, [sp, #48]
add sp, sp, #64
+#if defined(CONFIG_SHADOW_CALL_STACK) && !defined(CONFIG_DYNAMIC_SCS)
+ ldr x30, [scs_sp, #-8]!
+#endif
ret
SYM_CODE_END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
--
2.35.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [RFC PATCH v2] arm64: ftrace: Add shadow call stack protection
2022-12-01 18:17 [RFC PATCH v2] arm64: ftrace: Add shadow call stack protection Ard Biesheuvel
@ 2022-12-09 15:12 ` Ard Biesheuvel
0 siblings, 0 replies; 2+ messages in thread
From: Ard Biesheuvel @ 2022-12-09 15:12 UTC (permalink / raw)
To: linux-arm-kernel; +Cc: Mark Rutland, Kees Cook
On Thu, 1 Dec 2022 at 19:17, Ard Biesheuvel <ardb@kernel.org> wrote:
>
> The low-level ftrace code performs some manipulations of the return
> address that might be vulnerable to abuse as a gadget. We'd prefer to
> protect this code using PAC where we can, but due to the fact that this
> breaks pointer equality in ways that may interfere with the operation of
> ftrace in particular or the backtrace code in general, this needs some
> more careful thought.
>
> In the meantime, let's make the ftrace_caller() and return_to_handler()
> routines a bit more robust when shadow call stacks are enabled, by
> shadowing the return addresses that are captured and potentially
> manipulated by ftrace.
>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: Kees Cook <keescook@chromium.org>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> This supersedes "[PATCH 4/4] arm64: ftrace: Add return address
> protection" sent out on the 29th of November.
>
Please disregard - better to fix this comprehensively, and I;m about
to send out a v3 of the original series.
> arch/arm64/kernel/entry-ftrace.S | 27 ++++++++++++++++++++
> 1 file changed, 27 insertions(+)
>
> diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
> index 30cc2a9d1757a6a7..4ce262e7d9456761 100644
> --- a/arch/arm64/kernel/entry-ftrace.S
> +++ b/arch/arm64/kernel/entry-ftrace.S
> @@ -12,6 +12,7 @@
> #include <asm/assembler.h>
> #include <asm/ftrace.h>
> #include <asm/insn.h>
> +#include <asm/scs.h>
>
> #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
> /*
> @@ -36,6 +37,11 @@
> SYM_CODE_START(ftrace_caller)
> bti c
>
> +#if defined(CONFIG_SHADOW_CALL_STACK) && !defined(CONFIG_DYNAMIC_SCS)
> + /* Push the callsite's LR and the current LR to the shadow stack */
> + stp x9, x30, [scs_sp], #16
> +#endif
> +
> /* Save original SP */
> mov x10, sp
>
> @@ -93,6 +99,24 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
> /* Restore the callsite's SP */
> add sp, sp, #FREGS_SIZE + 32
>
> +#if defined(CONFIG_SHADOW_CALL_STACK) && !defined(CONFIG_DYNAMIC_SCS)
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> + /*
> + * The callsite's LR will be popped from the shadow call stack in
> + * return_to_handler() if a return via that function was inserted into
> + * the call stack by ftrace. That means we should leave the callsite's
> + * LR on the shadow call stack, and only pop the return address that
> + * takes us back to the callsite.
> + */
> + adr x10, return_to_handler
> + cmp x10, x30
> + b.ne 0f
> + ldr x9, [scs_sp, #-8]!
> + ret x9
> +#endif
> + /* Pop our return address and the callsite's LR from the shadow stack */
> +0: ldp x30, x9, [scs_sp, #-16]!
> +#endif
> ret x9
> SYM_CODE_END(ftrace_caller)
>
> @@ -265,6 +289,9 @@ SYM_CODE_START(return_to_handler)
> ldp x6, x7, [sp, #48]
> add sp, sp, #64
>
> +#if defined(CONFIG_SHADOW_CALL_STACK) && !defined(CONFIG_DYNAMIC_SCS)
> + ldr x30, [scs_sp, #-8]!
> +#endif
> ret
> SYM_CODE_END(return_to_handler)
> #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
> --
> 2.35.1
>
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2022-12-09 15:13 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-12-01 18:17 [RFC PATCH v2] arm64: ftrace: Add shadow call stack protection Ard Biesheuvel
2022-12-09 15:12 ` Ard Biesheuvel
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).