From: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>
To: linux-kernel@vger.kernel.org
Cc: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>,
Wei Liu <wei.liu@kernel.org>, Dexuan Cui <decui@microsoft.com>,
Tianyu Lan <Tianyu.Lan@microsoft.com>,
Michael Kelley <mikelley@microsoft.com>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
x86@kernel.org, linux-hyperv@vger.kernel.org,
Brijesh Singh <brijesh.singh@amd.com>,
Michael Roth <michael.roth@amd.com>,
Ashish Kalra <ashish.kalra@amd.com>,
Tom Lendacky <thomas.lendacky@amd.com>
Subject: [RFC PATCH v1 2/6] x86/sev: Add support for NestedVirtSnpMsr
Date: Mon, 23 Jan 2023 16:51:24 +0000 [thread overview]
Message-ID: <20230123165128.28185-3-jpiotrowski@linux.microsoft.com> (raw)
In-Reply-To: <20230123165128.28185-1-jpiotrowski@linux.microsoft.com>
The rmpupdate and psmash instructions, which are used in AMD's SEV-SNP
to update the RMP (Reverse Map) table, can't be trapped. For nested
scenarios, AMD defined MSR versions of these instructions which can be
emulated by the top-level hypervisor. One instance where these MSRs are
used are Hyper-V VMs which expose SNP isolation features to the guest.
The MSRs are defined in "AMD64 Architecture Programmer’s Manual, Volume 2:
System Programming", section 15.36.19.
Signed-off-by: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>
---
arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/include/asm/msr-index.h | 2 +
arch/x86/kernel/sev.c | 62 +++++++++++++++++++++++++-----
3 files changed, 55 insertions(+), 10 deletions(-)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 480b4eaef310..e6e2e824f67b 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -423,6 +423,7 @@
#define X86_FEATURE_SEV_SNP (19*32+ 4) /* AMD Secure Encrypted Virtualization - Secure Nested Paging */
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
+#define X86_FEATURE_NESTED_VIRT_SNP_MSR (19*32+29) /* Virtualizable RMPUPDATE and PSMASH MSR available */
/*
* BUG word(s)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 35100c630617..d6103e607896 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -567,6 +567,8 @@
#define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
#define MSR_AMD64_RMP_BASE 0xc0010132
#define MSR_AMD64_RMP_END 0xc0010133
+#define MSR_AMD64_VIRT_RMPUPDATE 0xc001f001
+#define MSR_AMD64_VIRT_PSMASH 0xc001f002
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 7fa39dc17edd..95404c7e5150 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -2566,6 +2566,24 @@ int snp_lookup_rmpentry(u64 pfn, int *level)
}
EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
+static bool virt_snp_msr(void)
+{
+ return boot_cpu_has(X86_FEATURE_NESTED_VIRT_SNP_MSR);
+}
+
+static u64 virt_psmash(u64 paddr)
+{
+ int ret;
+
+ asm volatile(
+ "wrmsr\n\t"
+ : "=a"(ret)
+ : "a"(paddr), "c"(MSR_AMD64_VIRT_PSMASH)
+ : "memory", "cc"
+ );
+ return ret;
+}
+
/*
* psmash is used to smash a 2MB aligned page into 4K
* pages while preserving the Validated bit in the RMP.
@@ -2581,11 +2599,15 @@ int psmash(u64 pfn)
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
return -ENXIO;
- /* Binutils version 2.36 supports the PSMASH mnemonic. */
- asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
- : "=a"(ret)
- : "a"(paddr)
- : "memory", "cc");
+ if (virt_snp_msr()) {
+ ret = virt_psmash(paddr);
+ } else {
+ /* Binutils version 2.36 supports the PSMASH mnemonic. */
+ asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
+ : "=a"(ret)
+ : "a"(paddr)
+ : "memory", "cc");
+ }
return ret;
}
@@ -2601,6 +2623,21 @@ static int invalidate_direct_map(unsigned long pfn, int npages)
return set_memory_np((unsigned long)pfn_to_kaddr(pfn), npages);
}
+static u64 virt_rmpupdate(unsigned long paddr, struct rmp_state *val)
+{
+ int ret;
+ register u64 hi asm("r8") = ((u64 *)val)[1];
+ register u64 lo asm("rdx") = ((u64 *)val)[0];
+
+ asm volatile(
+ "wrmsr\n\t"
+ : "=a"(ret)
+ : "a"(paddr), "c"(MSR_AMD64_VIRT_RMPUPDATE), "r"(lo), "r"(hi)
+ : "memory", "cc"
+ );
+ return ret;
+}
+
static int rmpupdate(u64 pfn, struct rmp_state *val)
{
unsigned long paddr = pfn << PAGE_SHIFT;
@@ -2626,11 +2663,16 @@ static int rmpupdate(u64 pfn, struct rmp_state *val)
}
retry:
- /* Binutils version 2.36 supports the RMPUPDATE mnemonic. */
- asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
- : "=a"(ret)
- : "a"(paddr), "c"((unsigned long)val)
- : "memory", "cc");
+
+ if (virt_snp_msr()) {
+ ret = virt_rmpupdate(paddr, val);
+ } else {
+ /* Binutils version 2.36 supports the RMPUPDATE mnemonic. */
+ asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
+ : "=a"(ret)
+ : "a"(paddr), "c"((unsigned long)val)
+ : "memory", "cc");
+ }
if (ret) {
if (!retries) {
--
2.25.1
next prev parent reply other threads:[~2023-01-23 16:51 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-23 16:51 [RFC PATCH v1 0/6] Support nested SNP KVM guests on Hyper-V Jeremi Piotrowski
2023-01-23 16:51 ` [RFC PATCH v1 1/6] x86/hyperv: Allocate RMP table during boot Jeremi Piotrowski
2023-01-28 19:26 ` Michael Kelley (LINUX)
2023-01-30 15:03 ` Jeremi Piotrowski
2023-01-23 16:51 ` Jeremi Piotrowski [this message]
2023-01-28 19:48 ` [RFC PATCH v1 2/6] x86/sev: Add support for NestedVirtSnpMsr Michael Kelley (LINUX)
2023-01-30 15:25 ` Jeremi Piotrowski
2023-01-30 15:39 ` Michael Kelley (LINUX)
2023-01-23 16:51 ` [RFC PATCH v1 3/6] x86/sev: Maintain shadow rmptable on Hyper-V Jeremi Piotrowski
2023-01-29 4:37 ` Michael Kelley (LINUX)
2023-01-30 16:51 ` Jeremi Piotrowski
2023-01-23 16:51 ` [RFC PATCH v1 4/6] x86/amd: Configure necessary MSRs for SNP during CPU init when running as a guest Jeremi Piotrowski
2023-01-29 4:44 ` Michael Kelley (LINUX)
2023-01-30 17:25 ` Jeremi Piotrowski
2023-01-23 16:51 ` [RFC PATCH v1 5/6] iommu/amd: Don't fail snp_enable when running virtualized Jeremi Piotrowski
2023-01-23 16:51 ` [RFC PATCH v1 6/6] crypto: ccp - Introduce quirk to always reclaim pages after SEV-legacy commands Jeremi Piotrowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230123165128.28185-3-jpiotrowski@linux.microsoft.com \
--to=jpiotrowski@linux.microsoft.com \
--cc=Tianyu.Lan@microsoft.com \
--cc=ashish.kalra@amd.com \
--cc=bp@alien8.de \
--cc=brijesh.singh@amd.com \
--cc=dave.hansen@linux.intel.com \
--cc=decui@microsoft.com \
--cc=linux-hyperv@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=michael.roth@amd.com \
--cc=mikelley@microsoft.com \
--cc=mingo@redhat.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
--cc=wei.liu@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).