From: Brian Gerst <brgerst@gmail.com>
To: linux-kernel@vger.kernel.org, x86@kernel.org
Cc: Ingo Molnar <mingo@kernel.org>,
Thomas Gleixner <tglx@linutronix.de>,
Borislav Petkov <bp@alien8.de>, "H . Peter Anvin" <hpa@zytor.com>,
Uros Bizjak <ubizjak@gmail.com>,
David.Laight@aculab.com, Brian Gerst <brgerst@gmail.com>
Subject: [PATCH v4 08/16] x86/stackprotector/64: Convert to normal percpu variable
Date: Fri, 22 Mar 2024 12:52:25 -0400 [thread overview]
Message-ID: <20240322165233.71698-9-brgerst@gmail.com> (raw)
In-Reply-To: <20240322165233.71698-1-brgerst@gmail.com>
Older versions of GCC fixed the location of the stack protector canary
at %gs:40. This constraint forced the percpu section to be linked at
virtual address 0 so that the canary could be the first data object in
the percpu section. Supporting the zero-based percpu section requires
additional code to handle relocations for RIP-relative references to
percpu data, extra complexity to kallsyms, and workarounds for linker
bugs due to the use of absolute symbols.
Use compiler options to redefine the stack protector location if
supported, otherwise use objtool. This will remove the contraint that
the percpu section must be zero-based.
Signed-off-by: Brian Gerst <brgerst@gmail.com>
---
arch/x86/Kconfig | 11 ++++----
arch/x86/Makefile | 21 ++++++++++------
arch/x86/entry/entry_64.S | 2 +-
arch/x86/include/asm/processor.h | 16 ++----------
arch/x86/include/asm/stackprotector.h | 36 ++++-----------------------
arch/x86/kernel/asm-offsets_64.c | 6 -----
arch/x86/kernel/cpu/common.c | 5 +---
arch/x86/kernel/head_64.S | 3 +--
arch/x86/xen/xen-head.S | 3 +--
9 files changed, 30 insertions(+), 73 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 121cfb9ffc0e..3dbefdb8a5d6 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -271,7 +271,7 @@ config X86
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_SETUP_PER_CPU_AREA
select HAVE_SOFTIRQ_ON_OWN_STACK
- select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
+ select HAVE_STACKPROTECTOR if X86_64 || CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if HAVE_OBJTOOL
select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL
@@ -411,15 +411,14 @@ config PGTABLE_LEVELS
config CC_HAS_SANE_STACKPROTECTOR
bool
- default y if 64BIT
+ default $(cc-option,-mstack-protector-guard-reg=gs -mstack-protector-guard-symbol=__stack_chk_guard) if 64BIT
default $(cc-option,-mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard)
- help
- We have to make sure stack protector is unconditionally disabled if
- the compiler does not allow control of the segment and symbol.
config STACKPROTECTOR_OBJTOOL
bool
- default n
+ depends on X86_64 && STACKPROTECTOR
+ default !CC_HAS_SANE_STACKPROTECTOR
+ prompt "Debug objtool stack protector conversion" if CC_HAS_SANE_STACKPROTECTOR && DEBUG_KERNEL
menu "Processor type and features"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 662d9d4033e6..2a3ba1abb802 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -116,13 +116,7 @@ ifeq ($(CONFIG_X86_32),y)
# temporary until string.h is fixed
KBUILD_CFLAGS += -ffreestanding
- ifeq ($(CONFIG_STACKPROTECTOR),y)
- ifeq ($(CONFIG_SMP),y)
- KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
- else
- KBUILD_CFLAGS += -mstack-protector-guard=global
- endif
- endif
+ percpu_seg := fs
else
BITS := 64
UTS_MACHINE := x86_64
@@ -172,6 +166,19 @@ else
KBUILD_CFLAGS += -mcmodel=kernel
KBUILD_RUSTFLAGS += -Cno-redzone=y
KBUILD_RUSTFLAGS += -Ccode-model=kernel
+
+ percpu_seg := gs
+endif
+
+ifeq ($(CONFIG_STACKPROTECTOR),y)
+ ifneq ($(CONFIG_STACKPROTECTOR_OBJTOOL),y)
+ ifeq ($(CONFIG_SMP),y)
+ KBUILD_CFLAGS += -mstack-protector-guard-reg=$(percpu_seg)
+ KBUILD_CFLAGS += -mstack-protector-guard-symbol=__stack_chk_guard
+ else
+ KBUILD_CFLAGS += -mstack-protector-guard=global
+ endif
+ endif
endif
#
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 8af2a26b24f6..9478ff768dd0 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -191,7 +191,7 @@ SYM_FUNC_START(__switch_to_asm)
#ifdef CONFIG_STACKPROTECTOR
movq TASK_stack_canary(%rsi), %rbx
- movq %rbx, PER_CPU_VAR(fixed_percpu_data + FIXED_stack_canary)
+ movq %rbx, PER_CPU_VAR(__stack_chk_guard)
#endif
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 89ed5237e79f..946bebce396f 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -387,16 +387,8 @@ struct irq_stack {
#ifdef CONFIG_X86_64
struct fixed_percpu_data {
- /*
- * GCC hardcodes the stack canary as %gs:40. Since the
- * irq_stack is the object at %gs:0, we reserve the bottom
- * 48 bytes of the irq stack for the canary.
- *
- * Once we are willing to require -mstack-protector-guard-symbol=
- * support for x86_64 stackprotector, we can get rid of this.
- */
char gs_base[40];
- unsigned long stack_canary;
+ unsigned long reserved;
};
DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
@@ -411,11 +403,7 @@ extern asmlinkage void entry_SYSCALL32_ignore(void);
/* Save actual FS/GS selectors and bases to current->thread */
void current_save_fsgs(void);
-#else /* X86_64 */
-#ifdef CONFIG_STACKPROTECTOR
-DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
-#endif
-#endif /* !X86_64 */
+#endif /* X86_64 */
struct perf_event;
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 00473a650f51..d43fb589fcf6 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -2,26 +2,10 @@
/*
* GCC stack protector support.
*
- * Stack protector works by putting predefined pattern at the start of
+ * Stack protector works by putting a predefined pattern at the start of
* the stack frame and verifying that it hasn't been overwritten when
- * returning from the function. The pattern is called stack canary
- * and unfortunately gcc historically required it to be at a fixed offset
- * from the percpu segment base. On x86_64, the offset is 40 bytes.
- *
- * The same segment is shared by percpu area and stack canary. On
- * x86_64, percpu symbols are zero based and %gs (64-bit) points to the
- * base of percpu area. The first occupant of the percpu area is always
- * fixed_percpu_data which contains stack_canary at the appropriate
- * offset. On x86_32, the stack canary is just a regular percpu
- * variable.
- *
- * Putting percpu data in %fs on 32-bit is a minor optimization compared to
- * using %gs. Since 32-bit userspace normally has %fs == 0, we are likely
- * to load 0 into %fs on exit to usermode, whereas with percpu data in
- * %gs, we are likely to load a non-null %gs on return to user mode.
- *
- * Once we are willing to require GCC 8.1 or better for 64-bit stackprotector
- * support, we can remove some of this complexity.
+ * returning from the function. The pattern is called the stack canary
+ * and is a unique value for each task.
*/
#ifndef _ASM_STACKPROTECTOR_H
@@ -36,6 +20,8 @@
#include <linux/sched.h>
+DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
+
/*
* Initialize the stackprotector canary value.
*
@@ -51,25 +37,13 @@ static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary = get_random_canary();
-#ifdef CONFIG_X86_64
- BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40);
-#endif
-
current->stack_canary = canary;
-#ifdef CONFIG_X86_64
- this_cpu_write(fixed_percpu_data.stack_canary, canary);
-#else
this_cpu_write(__stack_chk_guard, canary);
-#endif
}
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
{
-#ifdef CONFIG_X86_64
- per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
-#else
per_cpu(__stack_chk_guard, cpu) = idle->stack_canary;
-#endif
}
#else /* STACKPROTECTOR */
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index bb65371ea9df..590b6cd0eac0 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -54,11 +54,5 @@ int main(void)
BLANK();
#undef ENTRY
- BLANK();
-
-#ifdef CONFIG_STACKPROTECTOR
- OFFSET(FIXED_stack_canary, fixed_percpu_data, stack_canary);
- BLANK();
-#endif
return 0;
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9a34651d24e7..f49e8f5b858d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2063,16 +2063,13 @@ void syscall_init(void)
if (!cpu_feature_enabled(X86_FEATURE_FRED))
idt_syscall_init();
}
-
-#else /* CONFIG_X86_64 */
+#endif /* CONFIG_X86_64 */
#ifdef CONFIG_STACKPROTECTOR
DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
#endif
-#endif /* CONFIG_X86_64 */
-
/*
* Clear all 6 debug registers:
*/
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index b11526869a40..cfbf0486d424 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -361,8 +361,7 @@ SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
/* Set up %gs.
*
- * The base of %gs always points to fixed_percpu_data. If the
- * stack protector canary is enabled, it is located at %gs:40.
+ * The base of %gs always points to fixed_percpu_data.
* Note that, on SMP, the boot cpu uses init data section until
* the per cpu areas are set up.
*/
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 758bcd47b72d..ae4672ea00bb 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -53,8 +53,7 @@ SYM_CODE_START(startup_xen)
/* Set up %gs.
*
- * The base of %gs always points to fixed_percpu_data. If the
- * stack protector canary is enabled, it is located at %gs:40.
+ * The base of %gs always points to fixed_percpu_data.
* Note that, on SMP, the boot cpu uses init data section until
* the per cpu areas are set up.
*/
--
2.44.0
next prev parent reply other threads:[~2024-03-22 16:52 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-22 16:52 [PATCH v4 00/16] x86-64: Stack protector and percpu improvements Brian Gerst
2024-03-22 16:52 ` [PATCH v4 01/16] x86/stackprotector/32: Remove stack protector test script Brian Gerst
2024-03-23 17:00 ` Uros Bizjak
2024-03-22 16:52 ` [PATCH v4 02/16] x86/stackprotector/64: " Brian Gerst
2024-03-23 17:01 ` Uros Bizjak
2024-03-22 16:52 ` [PATCH v4 03/16] x86/boot: Disable stack protector for early boot code Brian Gerst
2024-03-22 16:52 ` [PATCH v4 04/16] x86/pvh: Use fixed_percpu_data for early boot GSBASE Brian Gerst
2024-03-22 16:52 ` [PATCH v4 05/16] x86/relocs: Handle R_X86_64_REX_GOTPCRELX relocations Brian Gerst
2024-03-22 16:52 ` [PATCH v4 06/16] objtool: Allow adding relocations to an existing section Brian Gerst
2024-03-22 16:52 ` [PATCH v4 07/16] objtool: Convert fixed location stack protector accesses Brian Gerst
2024-03-22 16:52 ` Brian Gerst [this message]
2024-03-23 17:11 ` [PATCH v4 08/16] x86/stackprotector/64: Convert to normal percpu variable Uros Bizjak
2024-03-22 16:52 ` [PATCH v4 09/16] x86/percpu/64: Use relative percpu offsets Brian Gerst
2024-03-23 17:14 ` Uros Bizjak
2024-03-22 16:52 ` [PATCH v4 10/16] x86/percpu/64: Remove fixed_percpu_data Brian Gerst
2024-03-23 17:14 ` Uros Bizjak
2024-03-22 16:52 ` [PATCH v4 11/16] x86/boot/64: Remove inverse relocations Brian Gerst
2024-03-22 16:52 ` [PATCH v4 12/16] x86/percpu/64: Remove INIT_PER_CPU macros Brian Gerst
2024-03-23 17:15 ` Uros Bizjak
2024-03-22 16:52 ` [PATCH v4 13/16] percpu: Remove PER_CPU_FIRST_SECTION Brian Gerst
2024-03-23 17:17 ` Uros Bizjak
2024-03-22 16:52 ` [PATCH v4 14/16] percpu: Remove PERCPU_VADDR() Brian Gerst
2024-03-22 16:52 ` [PATCH v4 15/16] percpu: Remove __per_cpu_load Brian Gerst
2024-03-22 16:52 ` [PATCH v4 16/16] kallsyms: Remove KALLSYMS_ABSOLUTE_PERCPU Brian Gerst
2024-03-23 11:39 ` [PATCH v4 00/16] x86-64: Stack protector and percpu improvements Uros Bizjak
2024-03-23 13:22 ` Brian Gerst
2024-03-23 16:16 ` Linus Torvalds
2024-03-23 17:06 ` Linus Torvalds
2024-03-24 19:09 ` David Laight
2024-03-25 14:51 ` Arnd Bergmann
2024-03-25 15:26 ` Takashi Iwai
2024-03-25 18:08 ` Arnd Bergmann
2024-03-26 7:02 ` Uros Bizjak
2024-03-23 22:55 ` Arnd Bergmann
2024-03-25 15:14 ` Ard Biesheuvel
2024-03-24 2:25 ` Ingo Molnar
2024-03-24 3:51 ` Brian Gerst
2024-03-24 4:05 ` Ingo Molnar
2024-03-24 5:43 ` Brian Gerst
2024-03-24 10:53 ` Ingo Molnar
2024-03-24 12:34 ` Brian Gerst
2024-03-24 18:14 ` Ingo Molnar
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240322165233.71698-9-brgerst@gmail.com \
--to=brgerst@gmail.com \
--cc=David.Laight@aculab.com \
--cc=bp@alien8.de \
--cc=hpa@zytor.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=tglx@linutronix.de \
--cc=ubizjak@gmail.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox