From: Ard Biesheuvel <ardb+git@google.com>
To: linux-kernel@vger.kernel.org
Cc: linux-efi@vger.kernel.org, x86@kernel.org,
Ard Biesheuvel <ardb@kernel.org>, Borislav Petkov <bp@alien8.de>,
Ingo Molnar <mingo@kernel.org>,
Dionna Amalie Glaze <dionnaglaze@google.com>,
Kevin Loughlin <kevinloughlin@google.com>,
Tom Lendacky <thomas.lendacky@amd.com>,
Josh Poimboeuf <jpoimboe@kernel.org>,
Peter Zijlstra <peterz@infradead.org>
Subject: [PATCH v4 06/24] x86/sev: Avoid global variable to store virtual address of SVSM area
Date: Wed, 9 Jul 2025 10:08:47 +0200 [thread overview]
Message-ID: <20250709080840.2233208-32-ardb+git@google.com> (raw)
In-Reply-To: <20250709080840.2233208-26-ardb+git@google.com>
From: Ard Biesheuvel <ardb@kernel.org>
The boottime SVSM calling area is used both by the startup code running
from a 1:1 mapping, and potentially later on running from the ordinary
kernel mapping.
This SVSM calling area is statically allocated, and so its physical
address doesn't change. However, its virtual address depends on the
calling context (1:1 mapping or kernel virtual mapping), and even though
the variable that holds the virtual address of this calling area gets
updated from 1:1 address to kernel address during the boot, it is hard
to reason about why this is guaranteed to be safe.
So instead, take the RIP-relative address of the boottime SVSM calling
area whenever its virtual address is required, and only use a global
variable for the physical address.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
arch/x86/boot/compressed/sev.c | 5 ++---
arch/x86/boot/startup/sev-shared.c | 6 ------
arch/x86/boot/startup/sev-startup.c | 4 ++--
arch/x86/coco/sev/core.c | 9 ---------
arch/x86/include/asm/sev-internal.h | 3 +--
arch/x86/include/asm/sev.h | 2 --
arch/x86/mm/mem_encrypt_amd.c | 6 ------
7 files changed, 5 insertions(+), 30 deletions(-)
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 3628e9bddc6a..6c0f91d38595 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -37,12 +37,12 @@ struct ghcb *boot_ghcb;
#define __BOOT_COMPRESSED
-extern struct svsm_ca *boot_svsm_caa;
extern u64 boot_svsm_caa_pa;
struct svsm_ca *svsm_get_caa(void)
{
- return boot_svsm_caa;
+ /* The decompressor is mapped 1:1 so VA == PA */
+ return (struct svsm_ca *)boot_svsm_caa_pa;
}
u64 svsm_get_caa_pa(void)
@@ -530,7 +530,6 @@ bool early_is_sevsnp_guest(void)
/* Obtain the address of the calling area to use */
boot_rdmsr(MSR_SVSM_CAA, &m);
- boot_svsm_caa = (void *)m.q;
boot_svsm_caa_pa = m.q;
/*
diff --git a/arch/x86/boot/startup/sev-shared.c b/arch/x86/boot/startup/sev-shared.c
index 7d2415d690c9..7ca59038269f 100644
--- a/arch/x86/boot/startup/sev-shared.c
+++ b/arch/x86/boot/startup/sev-shared.c
@@ -26,7 +26,6 @@
* early boot, both with identity mapped virtual addresses and proper kernel
* virtual addresses.
*/
-struct svsm_ca *boot_svsm_caa __ro_after_init;
u64 boot_svsm_caa_pa __ro_after_init;
/*
@@ -697,11 +696,6 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info,
if (caa & (PAGE_SIZE - 1))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CAA);
- /*
- * The CA is identity mapped when this routine is called, both by the
- * decompressor code and the early kernel code.
- */
- boot_svsm_caa = (struct svsm_ca *)caa;
boot_svsm_caa_pa = caa;
/* Advertise the SVSM presence via CPUID. */
diff --git a/arch/x86/boot/startup/sev-startup.c b/arch/x86/boot/startup/sev-startup.c
index 050e071504af..8edf1ba78a48 100644
--- a/arch/x86/boot/startup/sev-startup.c
+++ b/arch/x86/boot/startup/sev-startup.c
@@ -252,6 +252,7 @@ static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
{
+ struct snp_secrets_page *secrets = (void *)cc_info->secrets_phys;
struct svsm_call call = {};
int ret;
u64 pa;
@@ -280,7 +281,7 @@ static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
* RAX = 0 (Protocol=0, CallID=0)
* RCX = New CA GPA
*/
- call.caa = svsm_get_caa();
+ call.caa = (struct svsm_ca *)secrets->svsm_caa;
call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
call.rcx = pa;
do {
@@ -289,7 +290,6 @@ static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
if (ret)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
- boot_svsm_caa = (struct svsm_ca *)pa;
boot_svsm_caa_pa = pa;
}
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index 15be9e52848d..bea67d017bf0 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -1643,15 +1643,6 @@ void sev_show_status(void)
pr_cont("\n");
}
-void __init snp_update_svsm_ca(void)
-{
- if (!snp_vmpl)
- return;
-
- /* Update the CAA to a proper kernel address */
- boot_svsm_caa = &boot_svsm_ca_page;
-}
-
#ifdef CONFIG_SYSFS
static ssize_t vmpl_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
diff --git a/arch/x86/include/asm/sev-internal.h b/arch/x86/include/asm/sev-internal.h
index 6199b35a82e4..ffe4755962fe 100644
--- a/arch/x86/include/asm/sev-internal.h
+++ b/arch/x86/include/asm/sev-internal.h
@@ -60,7 +60,6 @@ void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
DECLARE_PER_CPU(struct svsm_ca *, svsm_caa);
DECLARE_PER_CPU(u64, svsm_caa_pa);
-extern struct svsm_ca *boot_svsm_caa;
extern u64 boot_svsm_caa_pa;
static __always_inline struct svsm_ca *svsm_get_caa(void)
@@ -68,7 +67,7 @@ static __always_inline struct svsm_ca *svsm_get_caa(void)
if (sev_cfg.use_cas)
return this_cpu_read(svsm_caa);
else
- return boot_svsm_caa;
+ return rip_rel_ptr(&boot_svsm_ca_page);
}
static __always_inline u64 svsm_get_caa_pa(void)
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 135e91a17d04..f3acbfcdca9a 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -519,7 +519,6 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 snp_get_unsupported_features(u64 status);
u64 sev_get_status(void);
void sev_show_status(void);
-void snp_update_svsm_ca(void);
int prepare_pte_enc(struct pte_enc_desc *d);
void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot);
void snp_kexec_finish(void);
@@ -600,7 +599,6 @@ static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
static inline u64 sev_get_status(void) { return 0; }
static inline void sev_show_status(void) { }
-static inline void snp_update_svsm_ca(void) { }
static inline int prepare_pte_enc(struct pte_enc_desc *d) { return 0; }
static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { }
static inline void snp_kexec_finish(void) { }
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index faf3a13fb6ba..2f8c32173972 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -536,12 +536,6 @@ void __init sme_early_init(void)
x86_init.resources.dmi_setup = snp_dmi_setup;
}
- /*
- * Switch the SVSM CA mapping (if active) from identity mapped to
- * kernel mapped.
- */
- snp_update_svsm_ca();
-
if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
}
--
2.50.0.727.gbf7dc18ff4-goog
next prev parent reply other threads:[~2025-07-09 8:11 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-09 8:08 [PATCH v4 00/24] x86: strict separation of startup code Ard Biesheuvel
2025-07-09 8:08 ` [PATCH v4 01/24] x86/sev: Separate MSR and GHCB based snp_cpuid() via a callback Ard Biesheuvel
2025-07-09 15:12 ` Tom Lendacky
2025-07-09 23:21 ` Ard Biesheuvel
2025-07-11 20:59 ` Borislav Petkov
2025-07-12 14:54 ` Tom Lendacky
2025-07-12 15:02 ` Borislav Petkov
2025-07-09 8:08 ` [PATCH v4 02/24] x86/sev: Use MSR protocol for remapping SVSM calling area Ard Biesheuvel
2025-07-09 8:08 ` [PATCH v4 03/24] x86/sev: Use MSR protocol only for early SVSM PVALIDATE call Ard Biesheuvel
2025-07-09 15:50 ` Tom Lendacky
2025-07-09 8:08 ` [PATCH v4 04/24] x86/sev: Run RMPADJUST on SVSM calling area page to test VMPL Ard Biesheuvel
2025-07-09 16:13 ` Tom Lendacky
2025-07-09 8:08 ` [PATCH v4 05/24] x86/sev: Move GHCB page based HV communication out of startup code Ard Biesheuvel
2025-07-09 8:08 ` Ard Biesheuvel [this message]
2025-07-09 17:49 ` [PATCH v4 06/24] x86/sev: Avoid global variable to store virtual address of SVSM area Tom Lendacky
2025-07-09 8:08 ` [PATCH v4 07/24] x86/sev: Move MSR save/restore out of early page state change helper Ard Biesheuvel
2025-07-09 18:16 ` Tom Lendacky
2025-07-09 8:08 ` [PATCH v4 08/24] x86/sev: Share implementation of MSR-based page state change Ard Biesheuvel
2025-07-09 18:24 ` Tom Lendacky
2025-07-15 5:09 ` Ard Biesheuvel
2025-07-09 8:08 ` [PATCH v4 09/24] x86/sev: Pass SVSM calling area down to early page state change API Ard Biesheuvel
2025-07-09 8:08 ` [PATCH v4 10/24] x86/sev: Use boot SVSM CA for all startup and init code Ard Biesheuvel
2025-07-09 8:08 ` [PATCH v4 11/24] x86/boot: Drop redundant RMPADJUST in SEV SVSM presence check Ard Biesheuvel
2025-07-09 8:08 ` [PATCH v4 12/24] x86/sev: Unify SEV-SNP hypervisor feature check Ard Biesheuvel
2025-07-10 4:21 ` Nikunj A Dadhania
2025-07-10 4:24 ` Ard Biesheuvel
[not found] ` <85o6tsv8m2.fsf@amd.com>
2025-07-10 7:36 ` Ard Biesheuvel
2025-07-10 8:02 ` Nikunj A Dadhania
2025-07-09 8:08 ` [PATCH v4 13/24] x86/boot: Provide PIC aliases for 5-level paging related constants Ard Biesheuvel
2025-07-09 8:08 ` [PATCH v4 14/24] x86/sev: Provide PIC aliases for SEV related data objects Ard Biesheuvel
2025-07-09 8:08 ` [PATCH v4 15/24] x86/sev: Move __sev_[get|put]_ghcb() into separate noinstr object Ard Biesheuvel
2025-07-09 8:08 ` [PATCH v4 16/24] x86/sev: Export startup routines for later use Ard Biesheuvel
2025-07-09 8:08 ` [PATCH v4 17/24] objtool: Add action to check for absence of absolute relocations Ard Biesheuvel
2025-07-09 8:08 ` [PATCH v4 18/24] x86/boot: Check startup code " Ard Biesheuvel
2025-07-09 8:09 ` [PATCH v4 19/24] x86/boot: Revert "Reject absolute references in .head.text" Ard Biesheuvel
2025-07-09 8:09 ` [PATCH v4 20/24] x86/kbuild: Incorporate boot/startup/ via Kbuild makefile Ard Biesheuvel
2025-07-09 8:09 ` [PATCH v4 21/24] x86/boot: Create a confined code area for startup code Ard Biesheuvel
2025-07-09 8:09 ` [PATCH v4 22/24] efistub/x86: Remap inittext read-execute when needed Ard Biesheuvel
2025-07-09 8:09 ` [PATCH v4 23/24] x86/boot: Move startup code out of __head section Ard Biesheuvel
2025-07-09 8:09 ` [PATCH v4 24/24] x86/boot: Get rid of the .head.text section Ard Biesheuvel
2025-07-09 10:23 ` [PATCH v4 00/24] x86: strict separation of startup code Kirill A. Shutemov
2025-07-09 10:32 ` Ard Biesheuvel
2025-07-09 14:01 ` Nikunj A Dadhania
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250709080840.2233208-32-ardb+git@google.com \
--to=ardb+git@google.com \
--cc=ardb@kernel.org \
--cc=bp@alien8.de \
--cc=dionnaglaze@google.com \
--cc=jpoimboe@kernel.org \
--cc=kevinloughlin@google.com \
--cc=linux-efi@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=thomas.lendacky@amd.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).