From: Sean Christopherson <seanjc@google.com>
To: Thomas Gleixner <tglx@kernel.org>, Ingo Molnar <mingo@redhat.com>,
Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
x86@kernel.org, Kiryl Shutsemau <kas@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Namhyung Kim <namhyung@kernel.org>,
Sean Christopherson <seanjc@google.com>,
Paolo Bonzini <pbonzini@redhat.com>
Cc: linux-kernel@vger.kernel.org, linux-coco@lists.linux.dev,
kvm@vger.kernel.org, linux-perf-users@vger.kernel.org,
Chao Gao <chao.gao@intel.com>,
Xu Yilun <yilun.xu@linux.intel.com>,
Dan Williams <dan.j.williams@intel.com>
Subject: [PATCH v3 07/16] KVM: SVM: Move core EFER.SVME enablement to kernel
Date: Fri, 13 Feb 2026 17:26:53 -0800 [thread overview]
Message-ID: <20260214012702.2368778-8-seanjc@google.com> (raw)
In-Reply-To: <20260214012702.2368778-1-seanjc@google.com>
Move the innermost EFER.SVME logic out of KVM and into to core x86 to land
the SVM support alongside VMX support. This will allow providing a more
unified API from the kernel to KVM, and will allow moving the bulk of the
emergency disabling insanity out of KVM without having a weird split
between kernel and KVM for SVM vs. VMX.
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/include/asm/virt.h | 6 +++++
arch/x86/kvm/svm/svm.c | 33 +++++------------------
arch/x86/virt/hw.c | 53 +++++++++++++++++++++++++++++++++++++
3 files changed, 65 insertions(+), 27 deletions(-)
diff --git a/arch/x86/include/asm/virt.h b/arch/x86/include/asm/virt.h
index cca0210a5c16..9a0753eaa20c 100644
--- a/arch/x86/include/asm/virt.h
+++ b/arch/x86/include/asm/virt.h
@@ -15,6 +15,12 @@ int x86_vmx_disable_virtualization_cpu(void);
void x86_vmx_emergency_disable_virtualization_cpu(void);
#endif
+#if IS_ENABLED(CONFIG_KVM_AMD)
+int x86_svm_enable_virtualization_cpu(void);
+int x86_svm_disable_virtualization_cpu(void);
+void x86_svm_emergency_disable_virtualization_cpu(void);
+#endif
+
#else
static __always_inline void x86_virt_init(void) {}
#endif
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0ae66c770ebc..5f033bf3ba83 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -478,27 +478,9 @@ static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm
return &sd->save_area->host_sev_es_save;
}
-static inline void kvm_cpu_svm_disable(void)
-{
- uint64_t efer;
-
- wrmsrq(MSR_VM_HSAVE_PA, 0);
- rdmsrq(MSR_EFER, efer);
- if (efer & EFER_SVME) {
- /*
- * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
- * NMI aren't blocked.
- */
- stgi();
- wrmsrq(MSR_EFER, efer & ~EFER_SVME);
- }
-}
-
static void svm_emergency_disable_virtualization_cpu(void)
{
- virt_rebooting = true;
-
- kvm_cpu_svm_disable();
+ wrmsrq(MSR_VM_HSAVE_PA, 0);
}
static void svm_disable_virtualization_cpu(void)
@@ -507,7 +489,7 @@ static void svm_disable_virtualization_cpu(void)
if (tsc_scaling)
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
- kvm_cpu_svm_disable();
+ x86_svm_disable_virtualization_cpu();
amd_pmu_disable_virt();
}
@@ -516,12 +498,12 @@ static int svm_enable_virtualization_cpu(void)
{
struct svm_cpu_data *sd;
- uint64_t efer;
int me = raw_smp_processor_id();
+ int r;
- rdmsrq(MSR_EFER, efer);
- if (efer & EFER_SVME)
- return -EBUSY;
+ r = x86_svm_enable_virtualization_cpu();
+ if (r)
+ return r;
sd = per_cpu_ptr(&svm_data, me);
sd->asid_generation = 1;
@@ -529,8 +511,6 @@ static int svm_enable_virtualization_cpu(void)
sd->next_asid = sd->max_asid + 1;
sd->min_asid = max_sev_asid + 1;
- wrmsrq(MSR_EFER, efer | EFER_SVME);
-
wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa);
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
@@ -541,7 +521,6 @@ static int svm_enable_virtualization_cpu(void)
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
}
-
/*
* Get OSVW bits.
*
diff --git a/arch/x86/virt/hw.c b/arch/x86/virt/hw.c
index dc426c2bc24a..014e9dfab805 100644
--- a/arch/x86/virt/hw.c
+++ b/arch/x86/virt/hw.c
@@ -163,6 +163,59 @@ static __init int x86_vmx_init(void)
static __init int x86_vmx_init(void) { return -EOPNOTSUPP; }
#endif
+#if IS_ENABLED(CONFIG_KVM_AMD)
+int x86_svm_enable_virtualization_cpu(void)
+{
+ u64 efer;
+
+ if (!cpu_feature_enabled(X86_FEATURE_SVM))
+ return -EOPNOTSUPP;
+
+ rdmsrq(MSR_EFER, efer);
+ if (efer & EFER_SVME)
+ return -EBUSY;
+
+ wrmsrq(MSR_EFER, efer | EFER_SVME);
+ return 0;
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_enable_virtualization_cpu);
+
+int x86_svm_disable_virtualization_cpu(void)
+{
+ int r = -EIO;
+ u64 efer;
+
+ /*
+ * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
+ * NMI aren't blocked.
+ */
+ asm goto("1: stgi\n\t"
+ _ASM_EXTABLE(1b, %l[fault])
+ ::: "memory" : fault);
+ r = 0;
+
+fault:
+ rdmsrq(MSR_EFER, efer);
+ wrmsrq(MSR_EFER, efer & ~EFER_SVME);
+ return r;
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_disable_virtualization_cpu);
+
+void x86_svm_emergency_disable_virtualization_cpu(void)
+{
+ u64 efer;
+
+ virt_rebooting = true;
+
+ rdmsrq(MSR_EFER, efer);
+ if (!(efer & EFER_SVME))
+ return;
+
+ x86_svm_disable_virtualization_cpu();
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_emergency_disable_virtualization_cpu);
+#endif
+
void __init x86_virt_init(void)
{
x86_vmx_init();
--
2.53.0.310.g728cabbaf7-goog
next prev parent reply other threads:[~2026-02-14 1:27 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-14 1:26 [PATCH v3 00/16] KVM: x86/tdx: Have TDX handle VMXON during bringup Sean Christopherson
2026-02-14 1:26 ` [PATCH v3 01/16] KVM: x86: Move kvm_rebooting to x86 Sean Christopherson
2026-02-14 1:26 ` [PATCH v3 02/16] KVM: VMX: Move architectural "vmcs" and "vmcs_hdr" structures to public vmx.h Sean Christopherson
2026-02-14 1:26 ` [PATCH v3 03/16] KVM: x86: Move "kvm_rebooting" to kernel as "virt_rebooting" Sean Christopherson
2026-02-14 1:26 ` [PATCH v3 04/16] KVM: VMX: Unconditionally allocate root VMCSes during boot CPU bringup Sean Christopherson
2026-02-14 1:26 ` [PATCH v3 05/16] x86/virt: Force-clear X86_FEATURE_VMX if configuring root VMCS fails Sean Christopherson
2026-02-16 20:53 ` dan.j.williams
2026-02-17 16:31 ` Sean Christopherson
2026-02-14 1:26 ` [PATCH v3 06/16] KVM: VMX: Move core VMXON enablement to kernel Sean Christopherson
2026-02-26 22:32 ` Dave Hansen
2026-02-14 1:26 ` Sean Christopherson [this message]
2026-02-26 7:40 ` [PATCH v3 07/16] KVM: SVM: Move core EFER.SVME " Chao Gao
2026-02-26 23:43 ` Sean Christopherson
2026-02-14 1:26 ` [PATCH v3 08/16] KVM: x86: Move bulk of emergency virtualizaton logic to virt subsystem Sean Christopherson
2026-02-26 8:55 ` Chao Gao
2026-02-14 1:26 ` [PATCH v3 09/16] x86/virt: Add refcounting of VMX/SVM usage to support multiple in-kernel users Sean Christopherson
2026-02-27 11:26 ` Chao Gao
2026-02-14 1:26 ` [PATCH v3 10/16] x86/virt/tdx: Drop the outdated requirement that TDX be enabled in IRQ context Sean Christopherson
2026-02-17 11:29 ` Huang, Kai
2026-02-17 15:25 ` Sean Christopherson
2026-02-17 20:30 ` Huang, Kai
2026-02-14 1:26 ` [PATCH v3 11/16] KVM: x86/tdx: Do VMXON and TDX-Module initialization during subsys init Sean Christopherson
2026-02-26 22:35 ` Dave Hansen
2026-02-27 11:28 ` Chao Gao
2026-02-14 1:26 ` [PATCH v3 12/16] x86/virt/tdx: Tag a pile of functions as __init, and globals as __ro_after_init Sean Christopherson
2026-02-14 1:26 ` [PATCH v3 13/16] x86/virt/tdx: KVM: Consolidate TDX CPU hotplug handling Sean Christopherson
2026-02-14 1:27 ` [PATCH v3 14/16] x86/virt/tdx: Use ida_is_empty() to detect if any TDs may be running Sean Christopherson
2026-02-14 1:27 ` [PATCH v3 15/16] KVM: Bury kvm_{en,dis}able_virtualization() in kvm_main.c once more Sean Christopherson
2026-02-14 1:27 ` [PATCH v3 16/16] KVM: TDX: Fold tdx_bringup() into tdx_hardware_setup() Sean Christopherson
2026-02-16 23:00 ` [PATCH v3 00/16] KVM: x86/tdx: Have TDX handle VMXON during bringup dan.j.williams
2026-02-25 14:38 ` Chao Gao
2026-03-03 21:39 ` Sagi Shahar
2026-03-04 0:06 ` Sagi Shahar
2026-03-05 17:08 ` Sean Christopherson
2026-03-05 18:50 ` dan.j.williams
2026-03-05 18:54 ` Dave Hansen
2026-03-05 19:07 ` Sean Christopherson
2026-03-05 19:08 ` Sean Christopherson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260214012702.2368778-8-seanjc@google.com \
--to=seanjc@google.com \
--cc=acme@kernel.org \
--cc=bp@alien8.de \
--cc=chao.gao@intel.com \
--cc=dan.j.williams@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=kas@kernel.org \
--cc=kvm@vger.kernel.org \
--cc=linux-coco@lists.linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=tglx@kernel.org \
--cc=x86@kernel.org \
--cc=yilun.xu@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox