From: Ard Biesheuvel <ardb+git@google.com>
To: linux-kernel@vger.kernel.org
Cc: Ard Biesheuvel <ardb@kernel.org>,
Kevin Loughlin <kevinloughlin@google.com>,
Tom Lendacky <thomas.lendacky@amd.com>,
Dionna Glaze <dionnaglaze@google.com>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
Andy Lutomirski <luto@kernel.org>, Arnd Bergmann <arnd@arndb.de>,
Nathan Chancellor <nathan@kernel.org>,
Nick Desaulniers <ndesaulniers@google.com>,
Justin Stitt <justinstitt@google.com>,
Brian Gerst <brgerst@gmail.com>,
linux-arch@vger.kernel.org, llvm@lists.linux.dev
Subject: [PATCH v2 13/17] x86/sev: Make all code reachable from 1:1 mapping __pitext
Date: Thu, 25 Jan 2024 12:28:32 +0100 [thread overview]
Message-ID: <20240125112818.2016733-32-ardb+git@google.com> (raw)
In-Reply-To: <20240125112818.2016733-19-ardb+git@google.com>
From: Ard Biesheuvel <ardb@kernel.org>
We cannot safely call any code when still executing from the 1:1 mapping
at early boot. The SEV init code in particular does a fair amount of
work this early, and calls into ordinary APIs, which is not safe.
So annotate all SEV code used early as __pitext and along with it, some
of the shared code that it relies on.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
arch/x86/boot/compressed/sev.c | 3 ++
arch/x86/include/asm/mem_encrypt.h | 8 +--
arch/x86/include/asm/pgtable.h | 6 +--
arch/x86/include/asm/sev.h | 6 +--
arch/x86/kernel/sev-shared.c | 26 +++++-----
arch/x86/kernel/sev.c | 14 +++---
arch/x86/lib/cmdline.c | 6 +--
arch/x86/lib/memcpy_64.S | 3 +-
arch/x86/lib/memset_64.S | 3 +-
arch/x86/mm/mem_encrypt_boot.S | 3 +-
arch/x86/mm/mem_encrypt_identity.c | 52 ++++++++++++++------
arch/x86/mm/pti.c | 2 +-
12 files changed, 81 insertions(+), 51 deletions(-)
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 454acd7a2daf..22b9de2724f7 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -116,6 +116,9 @@ static bool fault_in_kernel_space(unsigned long address)
#undef __init
#define __init
+#undef __pitext
+#define __pitext
+
#define __BOOT_COMPRESSED
/* Basic instruction decoding support needed */
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 359ada486fa9..48469e22a75e 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -46,8 +46,8 @@ void __init sme_unmap_bootdata(char *real_mode_data);
void __init sme_early_init(void);
-void __init sme_encrypt_kernel(struct boot_params *bp);
-void __init sme_enable(struct boot_params *bp);
+void sme_encrypt_kernel(struct boot_params *bp);
+void sme_enable(struct boot_params *bp);
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
@@ -75,8 +75,8 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
static inline void __init sme_early_init(void) { }
-static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
-static inline void __init sme_enable(struct boot_params *bp) { }
+static inline void sme_encrypt_kernel(struct boot_params *bp) { }
+static inline void sme_enable(struct boot_params *bp) { }
static inline void sev_es_init_vc_handling(void) { }
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 9d077bca6a10..8f45255a8e32 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1412,7 +1412,7 @@ extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
* Returns true for parts of the PGD that map userspace and
* false for the parts that map the kernel.
*/
-static inline bool pgdp_maps_userspace(void *__ptr)
+static __always_inline bool pgdp_maps_userspace(void *__ptr)
{
unsigned long ptr = (unsigned long)__ptr;
@@ -1435,7 +1435,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
* This generates better code than the inline assembly in
* __set_bit().
*/
-static inline void *ptr_set_bit(void *ptr, int bit)
+static __always_inline void *ptr_set_bit(void *ptr, int bit)
{
unsigned long __ptr = (unsigned long)ptr;
@@ -1450,7 +1450,7 @@ static inline void *ptr_clear_bit(void *ptr, int bit)
return (void *)__ptr;
}
-static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
+static __always_inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
{
return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
}
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 5b4a1ce3d368..e3b55bd15ce1 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -201,14 +201,14 @@ struct snp_guest_request_ioctl;
void setup_ghcb(void);
void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
unsigned long npages);
-void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
- unsigned long npages);
+void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages);
void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
void snp_set_wakeup_secondary_cpu(void);
bool snp_init(struct boot_params *bp);
-void __init __noreturn snp_abort(void);
+void __noreturn snp_abort(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 snp_get_unsupported_features(u64 status);
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 1d24ec679915..b432cac19d13 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -89,7 +89,8 @@ static bool __init sev_es_check_cpu_features(void)
return true;
}
-static void __noreturn sev_es_terminate(unsigned int set, unsigned int reason)
+static void __always_inline __noreturn sev_es_terminate(unsigned int set,
+ unsigned int reason)
{
u64 val = GHCB_MSR_TERM_REQ;
@@ -222,10 +223,9 @@ static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt
return ES_VMM_ERROR;
}
-static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt,
- u64 exit_code, u64 exit_info_1,
- u64 exit_info_2)
+static enum es_result __pitext
+sev_es_ghcb_hv_call(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ u64 exit_code, u64 exit_info_1, u64 exit_info_2)
{
/* Fill in protocol and format specifiers */
ghcb->protocol_version = ghcb_version;
@@ -241,7 +241,7 @@ static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
return verify_exception_info(ghcb, ctxt);
}
-static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg)
+static int __pitext __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg)
{
u64 val;
@@ -256,7 +256,7 @@ static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg)
return 0;
}
-static int __sev_cpuid_hv_msr(struct cpuid_leaf *leaf)
+static int __pitext __sev_cpuid_hv_msr(struct cpuid_leaf *leaf)
{
int ret;
@@ -391,7 +391,7 @@ static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
return xsave_size;
}
-static bool
+static bool __pitext
snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
@@ -427,7 +427,8 @@ snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
return false;
}
-static void snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+static void __pitext snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ struct cpuid_leaf *leaf)
{
if (sev_cpuid_hv(ghcb, ctxt, leaf))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV);
@@ -528,7 +529,8 @@ static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
* Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
* should be treated as fatal by caller.
*/
-static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+static int __pitext snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ struct cpuid_leaf *leaf)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
@@ -570,7 +572,7 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
* page yet, so it only supports the MSR based communication with the
* hypervisor and only the CPUID exit-code.
*/
-void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+void __pitext do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
{
unsigned int subfn = lower_bits(regs->cx, 32);
unsigned int fn = lower_bits(regs->ax, 32);
@@ -1043,7 +1045,7 @@ static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
* mapping needs to be updated in sync with all the changes to virtual memory
* layout and related mapping facilities throughout the boot process.
*/
-static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
+static void __pitext setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
{
const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table;
int i;
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index c67285824e82..e5793505b307 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -682,8 +682,8 @@ static u64 __init get_jump_table_addr(void)
return ret;
}
-static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
- unsigned long npages, enum psc_op op)
+static void __pitext early_set_pages_state(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages, enum psc_op op)
{
unsigned long paddr_end;
u64 val;
@@ -758,8 +758,8 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
}
-void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
- unsigned long npages)
+void __pitext early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages)
{
/*
* This can be invoked in early boot while running identity mapped, so
@@ -2059,7 +2059,7 @@ bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
*
* Scan for the blob in that order.
*/
-static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
+static __pitext struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
{
struct cc_blob_sev_info *cc_info;
@@ -2085,7 +2085,7 @@ static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
return cc_info;
}
-bool __init snp_init(struct boot_params *bp)
+bool __pitext snp_init(struct boot_params *bp)
{
struct cc_blob_sev_info *cc_info;
@@ -2107,7 +2107,7 @@ bool __init snp_init(struct boot_params *bp)
return true;
}
-void __init __noreturn snp_abort(void)
+void __pitext __noreturn snp_abort(void)
{
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
}
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index 80570eb3c89b..9f040b2882ae 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -119,7 +119,7 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size,
* Returns the length of the argument (regardless of if it was
* truncated to fit in the buffer), or -1 on not found.
*/
-static int
+static int __pitext
__cmdline_find_option(const char *cmdline, int max_cmdline_size,
const char *option, char *buffer, int bufsize)
{
@@ -203,12 +203,12 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
return len;
}
-int cmdline_find_option_bool(const char *cmdline, const char *option)
+int __pitext cmdline_find_option_bool(const char *cmdline, const char *option)
{
return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
}
-int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
+int __pitext cmdline_find_option(const char *cmdline, const char *option, char *buffer,
int bufsize)
{
return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option,
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 0ae2e1712e2e..48b0908d2c3e 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -4,11 +4,12 @@
#include <linux/export.h>
#include <linux/linkage.h>
#include <linux/cfi_types.h>
+#include <linux/init.h>
#include <asm/errno.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
-.section .noinstr.text, "ax"
+ __PITEXT
/*
* memcpy - Copy a memory block.
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 0199d56cb479..455424dcadc0 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -2,11 +2,12 @@
/* Copyright 2002 Andi Kleen, SuSE Labs */
#include <linux/export.h>
+#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
-.section .noinstr.text, "ax"
+ __PITEXT
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index e25288ee33c2..f951f4f86e5c 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -7,6 +7,7 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
+#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/pgtable.h>
#include <asm/page.h>
@@ -14,7 +15,7 @@
#include <asm/msr-index.h>
#include <asm/nospec-branch.h>
- .text
+ __PITEXT
.code64
SYM_FUNC_START(sme_encrypt_execute)
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 67d4530548ce..20b23da4a26d 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -90,7 +90,7 @@ static char sme_cmdline_arg[] __initdata = "mem_encrypt";
static char sme_cmdline_on[] __initdata = "on";
static char sme_cmdline_off[] __initdata = "off";
-static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+static void __pitext sme_clear_pgd(struct sme_populate_pgd_data *ppd)
{
unsigned long pgd_start, pgd_end, pgd_size;
pgd_t *pgd_p;
@@ -105,7 +105,7 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
memset(pgd_p, 0, pgd_size);
}
-static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
+static pud_t __pitext *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
{
pgd_t *pgd;
p4d_t *p4d;
@@ -142,7 +142,7 @@ static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
return pud;
}
-static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+static void __pitext sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
{
pud_t *pud;
pmd_t *pmd;
@@ -158,7 +158,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
}
-static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
+static void __pitext sme_populate_pgd(struct sme_populate_pgd_data *ppd)
{
pud_t *pud;
pmd_t *pmd;
@@ -184,7 +184,7 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
}
-static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
+static void __pitext __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
{
while (ppd->vaddr < ppd->vaddr_end) {
sme_populate_pgd_large(ppd);
@@ -194,7 +194,7 @@ static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
}
}
-static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
+static void __pitext __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
{
while (ppd->vaddr < ppd->vaddr_end) {
sme_populate_pgd(ppd);
@@ -204,7 +204,7 @@ static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
}
}
-static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
+static void __pitext __sme_map_range(struct sme_populate_pgd_data *ppd,
pmdval_t pmd_flags, pteval_t pte_flags)
{
unsigned long vaddr_end;
@@ -228,22 +228,22 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
__sme_map_range_pte(ppd);
}
-static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
+static void __pitext sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
}
-static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
+static void __pitext sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
}
-static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
+static void __pitext sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
}
-static unsigned long __init sme_pgtable_calc(unsigned long len)
+static unsigned long __pitext sme_pgtable_calc(unsigned long len)
{
unsigned long entries = 0, tables = 0;
@@ -280,7 +280,7 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
return entries + tables;
}
-void __init sme_encrypt_kernel(struct boot_params *bp)
+void __pitext sme_encrypt_kernel(struct boot_params *bp)
{
unsigned long workarea_start, workarea_end, workarea_len;
unsigned long execute_start, execute_end, execute_len;
@@ -493,7 +493,29 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
native_write_cr3(__native_read_cr3());
}
-void __init sme_enable(struct boot_params *bp)
+/**
+ * strncmp - Compare two length-limited strings
+ * @cs: One string
+ * @ct: Another string
+ * @count: The maximum number of bytes to compare
+ */
+static int __pitext __strncmp(const char *cs, const char *ct, size_t count)
+{
+ unsigned char c1, c2;
+
+ while (count) {
+ c1 = *cs++;
+ c2 = *ct++;
+ if (c1 != c2)
+ return c1 < c2 ? -1 : 1;
+ if (!c1)
+ break;
+ count--;
+ }
+ return 0;
+}
+
+void __pitext sme_enable(struct boot_params *bp)
{
const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
unsigned int eax, ebx, ecx, edx;
@@ -594,9 +616,9 @@ void __init sme_enable(struct boot_params *bp)
if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
return;
- if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
+ if (!__strncmp(buffer, cmdline_on, sizeof(buffer)))
sme_me_mask = me_mask;
- else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
+ else if (!__strncmp(buffer, cmdline_off, sizeof(buffer)))
sme_me_mask = 0;
else
sme_me_mask = active_by_default ? me_mask : 0;
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 669ba1c345b3..8fd1b84ab40c 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -121,7 +121,7 @@ static int __init pti_parse_cmdline_nopti(char *arg)
}
early_param("nopti", pti_parse_cmdline_nopti);
-pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+pgd_t __pitext __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
{
/*
* Changes to the high (kernel) portion of the kernelmode page
--
2.43.0.429.g432eaa2c6b-goog
next prev parent reply other threads:[~2024-01-25 11:33 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-25 11:28 [PATCH v2 00/17] x86: Confine early 1:1 mapped startup code Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 01/17] x86/startup_64: Drop long return to initial_code pointer Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 02/17] x86/startup_64: Simplify calculation of initial page table address Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 03/17] x86/startup_64: Simplify CR4 handling in startup code Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 04/17] x86/startup_64: Drop global variables to keep track of LA57 state Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 05/17] x86/startup_64: Simplify virtual switch on primary boot Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 06/17] x86/head64: Replace pointer fixups with PIE codegen Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 07/17] x86/head64: Simplify GDT/IDT initialization code Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 08/17] asm-generic: Add special .pi.text section for position independent code Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 09/17] x86: Move return_thunk to __pitext section Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 10/17] x86/head64: Move early startup code into __pitext Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 11/17] modpost: Warn about calls from __pitext into other text sections Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 12/17] x86/coco: Make cc_set_mask() static inline Ard Biesheuvel
2024-01-25 11:28 ` Ard Biesheuvel [this message]
2024-01-25 11:28 ` [PATCH v2 14/17] x86/sev: Avoid WARN() in early code Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 15/17] x86/sev: Use PIC codegen for early SEV startup code Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 16/17] x86/sev: Drop inline asm LEA instructions for RIP-relative references Ard Biesheuvel
2024-01-25 20:46 ` Kevin Loughlin
2024-01-25 23:24 ` Ard Biesheuvel
2024-01-25 11:28 ` [PATCH v2 17/17] x86/startup_64: Don't bother setting up GS before the kernel is mapped Ard Biesheuvel
2024-01-25 22:23 ` [PATCH v2 00/17] x86: Confine early 1:1 mapped startup code Nathan Chancellor
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240125112818.2016733-32-ardb+git@google.com \
--to=ardb+git@google.com \
--cc=ardb@kernel.org \
--cc=arnd@arndb.de \
--cc=bp@alien8.de \
--cc=brgerst@gmail.com \
--cc=dave.hansen@linux.intel.com \
--cc=dionnaglaze@google.com \
--cc=justinstitt@google.com \
--cc=kevinloughlin@google.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=llvm@lists.linux.dev \
--cc=luto@kernel.org \
--cc=mingo@redhat.com \
--cc=nathan@kernel.org \
--cc=ndesaulniers@google.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).