From: Ard Biesheuvel <ardb@kernel.org>
To: linux-efi@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, Ard Biesheuvel <ardb@kernel.org>,
Evgeniy Baskov <baskov@ispras.ru>, Borislav Petkov <bp@alien8.de>,
Andy Lutomirski <luto@kernel.org>,
Dave Hansen <dave.hansen@linux.intel.com>,
Ingo Molnar <mingo@redhat.com>,
Peter Zijlstra <peterz@infradead.org>,
Thomas Gleixner <tglx@linutronix.de>,
Alexey Khoroshilov <khoroshilov@ispras.ru>,
Peter Jones <pjones@redhat.com>,
Gerd Hoffmann <kraxel@redhat.com>, Dave Young <dyoung@redhat.com>,
Mario Limonciello <mario.limonciello@amd.com>,
Kees Cook <keescook@chromium.org>,
Tom Lendacky <thomas.lendacky@amd.com>,
"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
Linus Torvalds <torvalds@linux-foundation.org>,
Joerg Roedel <jroedel@suse.de>
Subject: [PATCH v7 11/22] x86/decompressor: Avoid the need for a stack in the 32-bit trampoline
Date: Fri, 28 Jul 2023 11:09:05 +0200 [thread overview]
Message-ID: <20230728090916.1538550-12-ardb@kernel.org> (raw)
In-Reply-To: <20230728090916.1538550-1-ardb@kernel.org>
The 32-bit trampoline no longer uses the stack for anything except
performing a far return back to long mode. Currently, this stack is
placed in the same page that carries the trampoline code, which means
this page must be mapped writable and executable, and the stack is
therefore executable as well.
Replace the far return with a far jump, so that the return address can
be pre-calculated and patched into the code before it is called. This
removes the need for a stack entirely, and in a later patch, this will
be taken advantage of by removing writable permissions from (and adding
executable permissions to) this code page explicitly when booting via
the EFI stub.
Not touching the stack pointer also makes it more straight-forward to
call the trampoline code as an ordinary 64-bit function from C code.
Note that we need to preserve the value of RSP across the switch into
compatibility mode: the stack pointer may get truncated to 32 bits.
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
arch/x86/boot/compressed/head_64.S | 64 ++++++++++----------
arch/x86/boot/compressed/pgtable.h | 4 +-
arch/x86/boot/compressed/pgtable_64.c | 12 +++-
3 files changed, 44 insertions(+), 36 deletions(-)
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 491d985be75fd5b0..1b0c61d1b389fd37 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -449,9 +449,6 @@ SYM_CODE_START(startup_64)
leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax
call *%rax
- /* Restore the stack, the 32-bit trampoline uses its own stack */
- leaq rva(boot_stack_end)(%rbx), %rsp
-
/*
* cleanup_trampoline() would restore trampoline memory.
*
@@ -537,32 +534,37 @@ SYM_FUNC_END(.Lrelocated)
* EDI contains the base address of the trampoline memory.
* Non-zero ESI means trampoline needs to enable 5-level paging.
*/
+ .section ".rodata", "a", @progbits
SYM_CODE_START(trampoline_32bit_src)
- /* Grab return address */
- movq (%rsp), %rax
-
- /* Set up 32-bit addressable stack */
- leaq TRAMPOLINE_32BIT_STACK_END(%rdi), %rsp
-
- /* Preserve return address and other live 64-bit registers */
- pushq %rax
+ /* Preserve live 64-bit registers */
pushq %r15
pushq %rbp
pushq %rbx
+ /* Preserve top half of RSP in a legacy mode GPR to avoid truncation */
+ movq %rsp, %rbx
+ shrq $32, %rbx
+
/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
pushq $__KERNEL32_CS
leaq 0f(%rip), %rax
pushq %rax
lretq
+ /*
+ * The 32-bit code below will do a far jump back to long mode and end
+ * up here after reconfiguring the number of paging levels.
+ */
+.Lret: shlq $32, %rbx // Reconstruct stack pointer
+ orq %rbx, %rsp
+
+ popq %rbx
+ popq %rbp
+ popq %r15
+ retq
+
.code32
0:
- /* Set up data and stack segments */
- movl $__KERNEL_DS, %eax
- movl %eax, %ds
- movl %eax, %ss
-
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
@@ -617,29 +619,25 @@ SYM_CODE_START(trampoline_32bit_src)
1:
movl %eax, %cr4
- /* Calculate address of paging_enabled() once we are executing in the trampoline */
- leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%edi), %eax
-
- /* Prepare the stack for far return to Long Mode */
- pushl $__KERNEL_CS
- pushl %eax
-
/* Enable paging again. */
movl %cr0, %eax
btsl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
- lret
+ /*
+ * Return to the 64-bit calling code using LJMP rather than LRET, to
+ * avoid the need for a 32-bit addressable stack. The destination
+ * address will be adjusted after the template code is copied into a
+ * 32-bit addressable buffer.
+ */
+.Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src)
SYM_CODE_END(trampoline_32bit_src)
- .code64
-SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
- /* Return from the trampoline */
- popq %rbx
- popq %rbp
- popq %r15
- retq
-SYM_FUNC_END(.Lpaging_enabled)
+/*
+ * This symbol is placed right after trampoline_32bit_src() so its address can
+ * be used to infer the size of the trampoline code.
+ */
+SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src)
/*
* The trampoline code has a size limit.
@@ -648,7 +646,7 @@ SYM_FUNC_END(.Lpaging_enabled)
*/
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
- .code32
+ .text
SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
1:
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
index 4e8cef135226bcbb..c6b0903aded05a07 100644
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -8,13 +8,13 @@
#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
#define TRAMPOLINE_32BIT_CODE_SIZE 0xA0
-#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
-
#ifndef __ASSEMBLER__
extern unsigned long *trampoline_32bit;
extern void trampoline_32bit_src(void *trampoline, bool enable_5lvl);
+extern const u16 trampoline_ljmp_imm_offset;
+
#endif /* __ASSEMBLER__ */
#endif /* BOOT_COMPRESSED_PAGETABLE_H */
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 2ac12ff4111bf8c0..d66639c961b8eeda 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -109,6 +109,7 @@ static unsigned long find_trampoline_placement(void)
struct paging_config paging_prepare(void *rmode)
{
struct paging_config paging_config = {};
+ void *tramp_code;
/* Initialize boot_params. Required for cmdline_find_option_bool(). */
boot_params = rmode;
@@ -143,9 +144,18 @@ struct paging_config paging_prepare(void *rmode)
memset(trampoline_32bit, 0, TRAMPOLINE_32BIT_SIZE);
/* Copy trampoline code in place */
- memcpy(trampoline_32bit + TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long),
+ tramp_code = memcpy(trampoline_32bit +
+ TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long),
&trampoline_32bit_src, TRAMPOLINE_32BIT_CODE_SIZE);
+ /*
+ * Avoid the need for a stack in the 32-bit trampoline code, by using
+ * LJMP rather than LRET to return back to long mode. LJMP takes an
+ * immediate absolute address, which needs to be adjusted based on the
+ * placement of the trampoline.
+ */
+ *(u32 *)(tramp_code + trampoline_ljmp_imm_offset) += (unsigned long)tramp_code;
+
/*
* The code below prepares page table in trampoline memory.
*
--
2.39.2
next prev parent reply other threads:[~2023-07-28 9:14 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-28 9:08 [PATCH v7 00/22] efi/x86: Avoid bare metal decompressor during EFI boot Ard Biesheuvel
2023-07-28 9:08 ` [PATCH v7 01/22] x86/decompressor: Don't rely on upper 32 bits of GPRs being preserved Ard Biesheuvel
2023-07-31 10:07 ` Borislav Petkov
2023-07-31 10:09 ` Ard Biesheuvel
2023-07-31 11:01 ` Borislav Petkov
2023-07-28 9:08 ` [PATCH v7 02/22] x86/head_64: Store boot_params pointer in callee save register Ard Biesheuvel
2023-08-01 11:28 ` Borislav Petkov
2023-07-28 9:08 ` [PATCH v7 03/22] x86/efistub: Branch straight to kernel entry point from C code Ard Biesheuvel
2023-07-28 9:08 ` [PATCH v7 04/22] x86/efistub: Simplify and clean up handover entry code Ard Biesheuvel
2023-07-28 9:08 ` [PATCH v7 05/22] x86/decompressor: Avoid magic offsets for EFI handover entrypoint Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 06/22] x86/efistub: Clear BSS in EFI handover protocol entrypoint Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 07/22] x86/decompressor: Use proper sequence to take the address of the GOT Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 08/22] x86/decompressor: Store boot_params pointer in callee save register Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 09/22] x86/decompressor: Call trampoline as a normal function Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 10/22] x86/decompressor: Use standard calling convention for trampoline Ard Biesheuvel
2023-07-31 11:28 ` Borislav Petkov
2023-07-31 11:35 ` Ard Biesheuvel
2023-07-28 9:09 ` Ard Biesheuvel [this message]
2023-08-01 10:30 ` [PATCH v7 11/22] x86/decompressor: Avoid the need for a stack in the 32-bit trampoline Borislav Petkov
2023-07-28 9:09 ` [PATCH v7 12/22] x86/decompressor: Call trampoline directly from C code Ard Biesheuvel
2023-08-01 11:45 ` Borislav Petkov
2023-08-01 11:48 ` Ard Biesheuvel
2023-08-01 11:51 ` Borislav Petkov
2023-07-28 9:09 ` [PATCH v7 13/22] x86/decompressor: Only call the trampoline when changing paging levels Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 14/22] x86/decompressor: Merge trampoline cleanup with switching code Ard Biesheuvel
2023-08-01 12:08 ` Borislav Petkov
2023-08-01 12:11 ` Ard Biesheuvel
2023-08-01 12:40 ` Borislav Petkov
2023-08-01 12:46 ` Ard Biesheuvel
2023-08-01 13:04 ` Borislav Petkov
2023-07-28 9:09 ` [PATCH v7 15/22] x86/efistub: Perform 4/5 level paging switch from the stub Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 16/22] x86/efistub: Prefer EFI memory attributes protocol over DXE services Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 17/22] decompress: Use 8 byte alignment Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 18/22] x86/decompressor: Move global symbol references to C code Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 19/22] x86/decompressor: Factor out kernel decompression and relocation Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 20/22] efi/libstub: Add limit argument to efi_random_alloc() Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 21/22] x86/efistub: Perform SNP feature test while running in the firmware Ard Biesheuvel
2023-07-28 9:09 ` [PATCH v7 22/22] x86/efistub: Avoid legacy decompressor when doing EFI boot Ard Biesheuvel
2023-08-02 10:26 ` Borislav Petkov
2023-08-02 10:47 ` Ard Biesheuvel
2023-08-02 15:55 ` Borislav Petkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230728090916.1538550-12-ardb@kernel.org \
--to=ardb@kernel.org \
--cc=baskov@ispras.ru \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=dyoung@redhat.com \
--cc=jroedel@suse.de \
--cc=keescook@chromium.org \
--cc=khoroshilov@ispras.ru \
--cc=kirill.shutemov@linux.intel.com \
--cc=kraxel@redhat.com \
--cc=linux-efi@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=luto@kernel.org \
--cc=mario.limonciello@amd.com \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=pjones@redhat.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox