From: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>
To: linux-kernel@vger.kernel.org
Cc: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>,
Wei Liu <wei.liu@kernel.org>, Dexuan Cui <decui@microsoft.com>,
Tianyu Lan <Tianyu.Lan@microsoft.com>,
Michael Kelley <mikelley@microsoft.com>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
x86@kernel.org, linux-hyperv@vger.kernel.org,
Brijesh Singh <brijesh.singh@amd.com>,
Michael Roth <michael.roth@amd.com>,
Ashish Kalra <ashish.kalra@amd.com>,
Tom Lendacky <thomas.lendacky@amd.com>
Subject: [RFC PATCH v2 2/7] x86/sev: Add support for NestedVirtSnpMsr
Date: Mon, 13 Feb 2023 10:33:57 +0000 [thread overview]
Message-ID: <20230213103402.1189285-3-jpiotrowski@linux.microsoft.com> (raw)
In-Reply-To: <20230213103402.1189285-1-jpiotrowski@linux.microsoft.com>
The rmpupdate and psmash instructions, which are used in AMD's SEV-SNP
to update the RMP (Reverse Map) table, can't be trapped. For nested
scenarios, AMD defined MSR versions of these instructions which can
be trapped and must be emulated by the L0 hypervisor. One instance where
these MSRs are used are Hyper-V VMs which expose SNP hardware isolation
capabilities to the L1 guest.
The MSRs are defined in "AMD64 Architecture Programmer’s Manual, Volume 2:
System Programming", section 15.36.19.
Signed-off-by: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>
---
arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/include/asm/msr-index.h | 2 +
arch/x86/kernel/sev.c | 80 ++++++++++++++++++++++++++----
3 files changed, 73 insertions(+), 10 deletions(-)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 480b4eaef310..e6e2e824f67b 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -423,6 +423,7 @@
#define X86_FEATURE_SEV_SNP (19*32+ 4) /* AMD Secure Encrypted Virtualization - Secure Nested Paging */
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
+#define X86_FEATURE_NESTED_VIRT_SNP_MSR (19*32+29) /* Virtualizable RMPUPDATE and PSMASH MSR available */
/*
* BUG word(s)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 35100c630617..d6103e607896 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -567,6 +567,8 @@
#define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
#define MSR_AMD64_RMP_BASE 0xc0010132
#define MSR_AMD64_RMP_END 0xc0010133
+#define MSR_AMD64_VIRT_RMPUPDATE 0xc001f001
+#define MSR_AMD64_VIRT_PSMASH 0xc001f002
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 7fa39dc17edd..ad09dd3747a1 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -2566,6 +2566,32 @@ int snp_lookup_rmpentry(u64 pfn, int *level)
}
EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
+static bool virt_snp_msr(void)
+{
+ return boot_cpu_has(X86_FEATURE_NESTED_VIRT_SNP_MSR);
+}
+
+/*
+ * This version of psmash is not implemented in hardware but always
+ * traps to L0 hypervisor. It doesn't follow usual wrmsr conventions.
+ * Inputs:
+ * rax: 2MB aligned GPA
+ * Outputs:
+ * rax: psmash return code
+ */
+static u64 virt_psmash(u64 paddr)
+{
+ int ret;
+
+ asm volatile(
+ "wrmsr\n\t"
+ : "=a"(ret)
+ : "a"(paddr), "c"(MSR_AMD64_VIRT_PSMASH)
+ : "memory", "cc"
+ );
+ return ret;
+}
+
/*
* psmash is used to smash a 2MB aligned page into 4K
* pages while preserving the Validated bit in the RMP.
@@ -2581,11 +2607,15 @@ int psmash(u64 pfn)
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
return -ENXIO;
- /* Binutils version 2.36 supports the PSMASH mnemonic. */
- asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
- : "=a"(ret)
- : "a"(paddr)
- : "memory", "cc");
+ if (virt_snp_msr()) {
+ ret = virt_psmash(paddr);
+ } else {
+ /* Binutils version 2.36 supports the PSMASH mnemonic. */
+ asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
+ : "=a"(ret)
+ : "a"(paddr)
+ : "memory", "cc");
+ }
return ret;
}
@@ -2601,6 +2631,31 @@ static int invalidate_direct_map(unsigned long pfn, int npages)
return set_memory_np((unsigned long)pfn_to_kaddr(pfn), npages);
}
+/*
+ * This version of rmpupdate is not implemented in hardware but always
+ * traps to L0 hypervisor. It doesn't follow usual wrmsr conventions.
+ * Inputs:
+ * rax: 4KB aligned GPA
+ * rdx: bytes 7:0 of new rmp entry
+ * r8: bytes 15:8 of new rmp entry
+ * Outputs:
+ * rax: rmpupdate return code
+ */
+static u64 virt_rmpupdate(unsigned long paddr, struct rmp_state *val)
+{
+ int ret;
+ register u64 hi asm("r8") = ((u64 *)val)[1];
+ register u64 lo asm("rdx") = ((u64 *)val)[0];
+
+ asm volatile(
+ "wrmsr\n\t"
+ : "=a"(ret)
+ : "a"(paddr), "c"(MSR_AMD64_VIRT_RMPUPDATE), "r"(lo), "r"(hi)
+ : "memory", "cc"
+ );
+ return ret;
+}
+
static int rmpupdate(u64 pfn, struct rmp_state *val)
{
unsigned long paddr = pfn << PAGE_SHIFT;
@@ -2626,11 +2681,16 @@ static int rmpupdate(u64 pfn, struct rmp_state *val)
}
retry:
- /* Binutils version 2.36 supports the RMPUPDATE mnemonic. */
- asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
- : "=a"(ret)
- : "a"(paddr), "c"((unsigned long)val)
- : "memory", "cc");
+
+ if (virt_snp_msr()) {
+ ret = virt_rmpupdate(paddr, val);
+ } else {
+ /* Binutils version 2.36 supports the RMPUPDATE mnemonic. */
+ asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
+ : "=a"(ret)
+ : "a"(paddr), "c"((unsigned long)val)
+ : "memory", "cc");
+ }
if (ret) {
if (!retries) {
--
2.25.1
next prev parent reply other threads:[~2023-02-13 10:34 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-13 10:33 [RFC PATCH v2 0/7] Support nested SNP KVM guests on Hyper-V Jeremi Piotrowski
2023-02-13 10:33 ` [RFC PATCH v2 1/7] x86/hyperv: Allocate RMP table during boot Jeremi Piotrowski
2023-02-13 10:33 ` Jeremi Piotrowski [this message]
2023-02-13 10:33 ` [RFC PATCH v2 3/7] x86/sev: Maintain shadow rmptable on Hyper-V Jeremi Piotrowski
2023-02-13 10:33 ` [RFC PATCH v2 4/7] x86/amd: Configure necessary MSRs for SNP during CPU init when running as a guest Jeremi Piotrowski
2023-02-13 10:34 ` [RFC PATCH v2 5/7] iommu/amd: Don't fail snp_enable when running virtualized Jeremi Piotrowski
2023-02-13 10:34 ` [RFC PATCH v2 6/7] crypto: ccp - Introduce quirk to always reclaim pages after SEV-legacy commands Jeremi Piotrowski
2023-02-13 10:34 ` [RFC PATCH v2 7/7] x86/fault: Handle RMP faults with 0 address when nested Jeremi Piotrowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230213103402.1189285-3-jpiotrowski@linux.microsoft.com \
--to=jpiotrowski@linux.microsoft.com \
--cc=Tianyu.Lan@microsoft.com \
--cc=ashish.kalra@amd.com \
--cc=bp@alien8.de \
--cc=brijesh.singh@amd.com \
--cc=dave.hansen@linux.intel.com \
--cc=decui@microsoft.com \
--cc=linux-hyperv@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=michael.roth@amd.com \
--cc=mikelley@microsoft.com \
--cc=mingo@redhat.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
--cc=wei.liu@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox