From: Binbin Wu <binbin.wu@linux.intel.com>
To: kvm@vger.kernel.org
Cc: pbonzini@redhat.com, seanjc@google.com,
rick.p.edgecombe@intel.com, xiaoyao.li@intel.com,
chao.gao@intel.com, kai.huang@intel.com,
binbin.wu@linux.intel.com
Subject: [RFC PATCH 10/27] KVM: x86: Use vendor-specific overlay flags instead of F_CPUID_DEFAULT
Date: Fri, 17 Apr 2026 15:35:53 +0800 [thread overview]
Message-ID: <20260417073610.3246316-11-binbin.wu@linux.intel.com> (raw)
In-Reply-To: <20260417073610.3246316-1-binbin.wu@linux.intel.com>
Use F_CPUID_VMX or F_CPUID_SVM instead of F_CPUID_DEFAULT when a feature
is vendor specific and the underlying hardware capability is not
checked. Also, use respective flags in vendor modules.
A feature initialized via F() and its variants in kvm_cpu_cap_init()
will check the host CPU capability or the raw CPUID, the feature can
only be set to the related overlay when the feature is supported by the
underlying hardware. Using F_CPUID_VMX or F_CPUID_SVM when a feature is
vendor specific makes the code more readable, however, it could
introduce regressions if a common feature is set only for one vendor.
For simplicity, just keep using F_CPUID_DEFAULT when the underlying
hardware capability will be checked.
Features initialized via kvm_cpu_cap_set(), EMULATED_F() doesn't check
the host CPU capability or the raw CPUID, use F_CPUID_VMX or
F_CPUID_SVM respectively if the feature is vendor specific.
In vendor modules, vendor flags are used respectively.
There are a few exceptions, i.e., IBT, BUS_LOCK_DETECT, and
MSR_IMM. They are common features for both vendors, but not supported
by SVM yet. Use F_CPUID_VMX instead of F_CPUID_DEFAULT for them.
Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
---
arch/x86/kvm/cpuid.c | 30 +++++++++++++++---------------
arch/x86/kvm/svm/sev.c | 6 +++---
arch/x86/kvm/svm/svm.c | 38 +++++++++++++++++++-------------------
arch/x86/kvm/vmx/vmx.c | 36 ++++++++++++++++++------------------
4 files changed, 55 insertions(+), 55 deletions(-)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 2c4e64aa14c4..71959f4918e7 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -983,7 +983,7 @@ void kvm_initialize_cpu_caps(void)
/* MPX_MAWAU */
F(RDPID, F_CPUID_DEFAULT | F_CPUID_TDX),
/* KEY_LOCKER */
- F(BUS_LOCK_DETECT, F_CPUID_DEFAULT | F_CPUID_TDX),
+ F(BUS_LOCK_DETECT, F_CPUID_VMX | F_CPUID_TDX),
F(CLDEMOTE, F_CPUID_DEFAULT | F_CPUID_TDX),
/* Reserved */
F(MOVDIRI, F_CPUID_DEFAULT | F_CPUID_TDX),
@@ -1022,7 +1022,7 @@ void kvm_initialize_cpu_caps(void)
/* HYBRID_CPU */
F(TSXLDTRK, F_CPUID_DEFAULT | F_CPUID_TDX),
/* Reserved, PCONFIG, ARCH_LBR */
- F(IBT, F_CPUID_DEFAULT | F_CPUID_TDX),
+ F(IBT, F_CPUID_VMX | F_CPUID_TDX),
/* Reserved */
F(AMX_BF16, F_CPUID_DEFAULT | F_CPUID_TDX),
F(AVX512_FP16, F_CPUID_DEFAULT | F_CPUID_TDX),
@@ -1049,11 +1049,11 @@ void kvm_initialize_cpu_caps(void)
if (boot_cpu_has(X86_FEATURE_AMD_IBPB_RET) &&
boot_cpu_has(X86_FEATURE_AMD_IBPB) &&
boot_cpu_has(X86_FEATURE_AMD_IBRS))
- kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL, F_CPUID_SVM);
if (boot_cpu_has(X86_FEATURE_STIBP))
- kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP, F_CPUID_VMX);
if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
- kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD, F_CPUID_SVM);
kvm_cpu_cap_init(CPUID_7_1_EAX,
F(SHA512, F_CPUID_DEFAULT | F_CPUID_TDX),
@@ -1075,7 +1075,7 @@ void kvm_initialize_cpu_caps(void)
kvm_cpu_cap_init(CPUID_7_1_ECX,
/* MSR_IMM is reserved in TDX spec */
- SCATTERED_F(MSR_IMM, F_CPUID_DEFAULT),
+ SCATTERED_F(MSR_IMM, F_CPUID_VMX),
);
kvm_cpu_cap_init(CPUID_7_1_EDX,
@@ -1217,26 +1217,26 @@ void kvm_initialize_cpu_caps(void)
* record that in cpufeatures so use them.
*/
if (boot_cpu_has(X86_FEATURE_IBPB)) {
- kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB, F_CPUID_SVM);
if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
!boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB))
- kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB_RET, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB_RET, F_CPUID_SVM);
}
if (boot_cpu_has(X86_FEATURE_IBRS))
- kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS, F_CPUID_SVM);
if (boot_cpu_has(X86_FEATURE_STIBP))
- kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP, F_CPUID_SVM);
if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
- kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD, F_CPUID_SVM);
if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
- kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO, F_CPUID_SVM);
/*
* The preference is to use SPEC CTRL MSR instead of the
* VIRT_SPEC MSR.
*/
if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
!boot_cpu_has(X86_FEATURE_AMD_SSBD))
- kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD, F_CPUID_SVM);
/* All SVM features required additional vendor module enabling. */
kvm_cpu_cap_init(CPUID_8000_000A_EDX,
@@ -1282,7 +1282,7 @@ void kvm_initialize_cpu_caps(void)
F(NULL_SEL_CLR_BASE, F_CPUID_DEFAULT),
/* UpperAddressIgnore */
F(AUTOIBRS, F_CPUID_DEFAULT),
- EMULATED_F(NO_SMM_CTL_MSR, F_CPUID_DEFAULT),
+ EMULATED_F(NO_SMM_CTL_MSR, F_CPUID_SVM),
/* PrefetchCtlMsr */
/* GpOnUserCpuid */
/* EPSF */
@@ -1305,7 +1305,7 @@ void kvm_initialize_cpu_caps(void)
);
if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
- kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE, F_CPUID_SVM);
kvm_cpu_cap_init(CPUID_C000_0001_EDX,
F(XSTORE, F_CPUID_DEFAULT),
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 6ec9c806e1fb..4b10d63a095a 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -3014,15 +3014,15 @@ void sev_vm_destroy(struct kvm *kvm)
void __init sev_set_cpu_caps(void)
{
if (sev_enabled) {
- kvm_cpu_cap_set(X86_FEATURE_SEV, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_SEV, F_CPUID_SVM);
kvm_caps.supported_vm_types |= BIT(KVM_X86_SEV_VM);
}
if (sev_es_enabled) {
- kvm_cpu_cap_set(X86_FEATURE_SEV_ES, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_SEV_ES, F_CPUID_SVM);
kvm_caps.supported_vm_types |= BIT(KVM_X86_SEV_ES_VM);
}
if (sev_snp_enabled) {
- kvm_cpu_cap_set(X86_FEATURE_SEV_SNP, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_SEV_SNP, F_CPUID_SVM);
kvm_caps.supported_vm_types |= BIT(KVM_X86_SNP_VM);
}
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 2b4a17536580..a21c500e1a91 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -5445,48 +5445,48 @@ static __init void svm_set_cpu_caps(void)
kvm_caps.supported_perf_cap = 0;
- kvm_cpu_cap_clear(X86_FEATURE_IBT, F_CPUID_DEFAULT);
+ kvm_cpu_cap_clear(X86_FEATURE_IBT, F_CPUID_SVM);
/* CPUID 0x80000001 and 0x8000000A (SVM features) */
if (nested) {
- kvm_cpu_cap_set(X86_FEATURE_SVM, F_CPUID_DEFAULT);
- kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_SVM, F_CPUID_SVM);
+ kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN, F_CPUID_SVM);
/*
* KVM currently flushes TLBs on *every* nested SVM transition,
* and so for all intents and purposes KVM supports flushing by
* ASID, i.e. KVM is guaranteed to honor every L1 ASID flush.
*/
- kvm_cpu_cap_set(X86_FEATURE_FLUSHBYASID, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_FLUSHBYASID, F_CPUID_SVM);
if (nrips)
- kvm_cpu_cap_set(X86_FEATURE_NRIPS, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_NRIPS, F_CPUID_SVM);
if (npt_enabled)
- kvm_cpu_cap_set(X86_FEATURE_NPT, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_NPT, F_CPUID_SVM);
if (tsc_scaling)
- kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR, F_CPUID_SVM);
if (vls)
- kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD, F_CPUID_SVM);
if (lbrv)
- kvm_cpu_cap_set(X86_FEATURE_LBRV, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_LBRV, F_CPUID_SVM);
if (boot_cpu_has(X86_FEATURE_PAUSEFILTER))
- kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER, F_CPUID_SVM);
if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
- kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD, F_CPUID_SVM);
if (vgif)
- kvm_cpu_cap_set(X86_FEATURE_VGIF, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_VGIF, F_CPUID_SVM);
if (vnmi)
- kvm_cpu_cap_set(X86_FEATURE_VNMI, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_VNMI, F_CPUID_SVM);
/* Nested VM can receive #VMEXIT instead of triggering #GP */
- kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK, F_CPUID_SVM);
}
if (cpu_feature_enabled(X86_FEATURE_BUS_LOCK_THRESHOLD))
@@ -5495,7 +5495,7 @@ static __init void svm_set_cpu_caps(void)
/* CPUID 0x80000008 */
if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
boot_cpu_has(X86_FEATURE_AMD_SSBD))
- kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD, F_CPUID_SVM);
if (enable_pmu) {
/*
@@ -5507,11 +5507,11 @@ static __init void svm_set_cpu_caps(void)
kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS,
kvm_pmu_cap.num_counters_gp);
else
- kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE, F_CPUID_DEFAULT);
+ kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE, F_CPUID_SVM);
if (kvm_pmu_cap.version != 2 ||
!kvm_cpu_cap_has(NULL, X86_FEATURE_PERFCTR_CORE))
- kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2, F_CPUID_DEFAULT);
+ kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2, F_CPUID_SVM);
}
/* CPUID 0x8000001F (SME/SEV features) */
@@ -5521,8 +5521,8 @@ static __init void svm_set_cpu_caps(void)
* Clear capabilities that are automatically configured by common code,
* but that require explicit SVM support (that isn't yet implemented).
*/
- kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT, F_CPUID_DEFAULT);
- kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM, F_CPUID_DEFAULT);
+ kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT, F_CPUID_SVM);
+ kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM, F_CPUID_SVM);
kvm_setup_xss_caps();
kvm_finalize_cpu_caps();
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d6d32f3d162b..f772558758f7 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8086,55 +8086,55 @@ static __init void vmx_set_cpu_caps(void)
/* CPUID 0x1 */
if (nested)
- kvm_cpu_cap_set(X86_FEATURE_VMX, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_VMX, F_CPUID_VMX);
/* CPUID 0x7 */
/* MPX is fixed-0 for TDX */
if (kvm_mpx_supported())
- kvm_cpu_cap_check_and_set(X86_FEATURE_MPX, F_CPUID_DEFAULT);
+ kvm_cpu_cap_check_and_set(X86_FEATURE_MPX, F_CPUID_VMX);
/* INVPCID is fixed-1 for TDX */
if (!cpu_has_vmx_invpcid())
- kvm_cpu_cap_clear(X86_FEATURE_INVPCID, F_CPUID_DEFAULT);
+ kvm_cpu_cap_clear(X86_FEATURE_INVPCID, F_CPUID_VMX);
/* KVM doesn't support PT for TDX yet */
if (vmx_pt_mode_is_host_guest())
- kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT, F_CPUID_DEFAULT);
+ kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT, F_CPUID_VMX);
/* DS and DTES64 are fixed-1 for TDX */
- enable_mask = vmx_pebs_supported() ? F_CPUID_TDX | F_CPUID_DEFAULT : F_CPUID_TDX;
+ enable_mask = vmx_pebs_supported() ? F_CPUID_TDX | F_CPUID_VMX : F_CPUID_TDX;
kvm_cpu_cap_check_and_set(X86_FEATURE_DS, enable_mask);
kvm_cpu_cap_check_and_set(X86_FEATURE_DTES64, enable_mask);
/* PDCM is fixed-1 for TDX */
if (!enable_pmu)
- kvm_cpu_cap_clear(X86_FEATURE_PDCM, F_CPUID_DEFAULT);
+ kvm_cpu_cap_clear(X86_FEATURE_PDCM, F_CPUID_VMX);
kvm_caps.supported_perf_cap = vmx_get_perf_capabilities();
/* SGX related features are fixed-0 for TDX */
if (!enable_sgx) {
- kvm_cpu_cap_clear(X86_FEATURE_SGX, F_CPUID_DEFAULT);
- kvm_cpu_cap_clear(X86_FEATURE_SGX_LC, F_CPUID_DEFAULT);
- kvm_cpu_cap_clear(X86_FEATURE_SGX1, F_CPUID_DEFAULT);
- kvm_cpu_cap_clear(X86_FEATURE_SGX2, F_CPUID_DEFAULT);
- kvm_cpu_cap_clear(X86_FEATURE_SGX_EDECCSSA, F_CPUID_DEFAULT);
+ kvm_cpu_cap_clear(X86_FEATURE_SGX, F_CPUID_VMX);
+ kvm_cpu_cap_clear(X86_FEATURE_SGX_LC, F_CPUID_VMX);
+ kvm_cpu_cap_clear(X86_FEATURE_SGX1, F_CPUID_VMX);
+ kvm_cpu_cap_clear(X86_FEATURE_SGX2, F_CPUID_VMX);
+ kvm_cpu_cap_clear(X86_FEATURE_SGX_EDECCSSA, F_CPUID_VMX);
}
if (vmx_umip_emulated())
- kvm_cpu_cap_set(X86_FEATURE_UMIP, F_CPUID_DEFAULT);
+ kvm_cpu_cap_set(X86_FEATURE_UMIP, F_CPUID_VMX);
/* CPUID 0xD.1 */
/* XSAVES is fixed-1 for TDX */
if (!cpu_has_vmx_xsaves())
- kvm_cpu_cap_clear(X86_FEATURE_XSAVES, F_CPUID_DEFAULT);
+ kvm_cpu_cap_clear(X86_FEATURE_XSAVES, F_CPUID_VMX);
/* CPUID 0x80000001 and 0x7 (RDPID) */
if (!cpu_has_vmx_rdtscp()) {
- kvm_cpu_cap_clear(X86_FEATURE_RDTSCP, F_CPUID_DEFAULT);
- kvm_cpu_cap_clear(X86_FEATURE_RDPID, F_CPUID_DEFAULT);
+ kvm_cpu_cap_clear(X86_FEATURE_RDTSCP, F_CPUID_VMX);
+ kvm_cpu_cap_clear(X86_FEATURE_RDPID, F_CPUID_VMX);
}
/* KVM doesn't support WAITPKG for TDX yet */
if (cpu_has_vmx_waitpkg())
- kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG, F_CPUID_DEFAULT);
+ kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG, F_CPUID_VMX);
/*
* Disable CET if unrestricted_guest is unsupported as KVM doesn't
@@ -8144,8 +8144,8 @@ static __init void vmx_set_cpu_caps(void)
*/
if (!cpu_has_load_cet_ctrl() || !enable_unrestricted_guest ||
!cpu_has_vmx_basic_no_hw_errcode_cc()) {
- kvm_cpu_cap_clear(X86_FEATURE_SHSTK, F_CPUID_DEFAULT | F_CPUID_TDX);
- kvm_cpu_cap_clear(X86_FEATURE_IBT, F_CPUID_DEFAULT | F_CPUID_TDX);
+ kvm_cpu_cap_clear(X86_FEATURE_SHSTK, F_CPUID_VMX | F_CPUID_TDX);
+ kvm_cpu_cap_clear(X86_FEATURE_IBT, F_CPUID_VMX | F_CPUID_TDX);
}
kvm_setup_xss_caps();
--
2.46.0
next prev parent reply other threads:[~2026-04-17 7:32 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-17 7:35 [RFC PATCH 00/27] KVM: x86: Add a paranoid mode for CPUID verification Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 01/27] KVM: x86: Fix emulated CPUID features being applied to wrong sub-leaf Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 02/27] KVM: x86: Reorder the features for CPUID 7 Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 03/27] KVM: x86: Add definitions for CPUID overlays Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 04/27] KVM: x86: Extend F() and its variants " Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 05/27] KVM: x86: Extend kvm_cpu_cap_{set/clear}() to configure overlays Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 06/27] KVM: x86: Populate TDX CPUID overlay with supported feature bits Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 07/27] KVM: x86: Support KVM_GET_{SUPPORTED,EMULATED}_CPUID as VM scope ioctls Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 08/27] KVM: x86: Thread @kvm to KVM CPU capability helpers Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 09/27] KVM: x86: Use overlays of KVM CPU capabilities Binbin Wu
2026-04-17 7:35 ` Binbin Wu [this message]
2026-04-17 7:35 ` [RFC PATCH 11/27] KVM: SVM: Drop unnecessary clears of unsupported common x86 features Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 12/27] KVM: x86: Split KVM CPU cap leafs into two parts Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 13/27] KVM: x86: Add a helper to initialize CPUID multi-bit fields Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 14/27] KVM: x86: Add a helper to init multiple feature bits based on raw CPUID Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 15/27] KVM: x86: Add infrastructure to track CPUID entries ignored in paranoid mode Binbin Wu
2026-04-17 7:35 ` [RFC PATCH 16/27] KVM: x86: Init allowed masks for basic CPUID range " Binbin Wu
2026-04-17 7:36 ` [RFC PATCH 17/27] KVM: x86: Init allowed masks for extended " Binbin Wu
2026-04-17 7:36 ` [RFC PATCH 18/27] KVM: x86: Handle Centaur CPUID leafs " Binbin Wu
2026-04-17 7:36 ` [RFC PATCH 19/27] KVM: x86: Track KVM PV CPUID features for " Binbin Wu
2026-04-17 7:36 ` [RFC PATCH 20/27] KVM: x86: Add per-VM flag to track CPUID " Binbin Wu
2026-04-17 7:36 ` [RFC PATCH 21/27] KVM: x86: Make kvm_vcpu_after_set_cpuid() return an error code Binbin Wu
2026-04-17 7:36 ` [RFC PATCH 22/27] KVM: x86: Verify userspace CPUID inputs in paranoid mode Binbin Wu
2026-04-17 7:36 ` [RFC PATCH 23/27] KVM: x86: Account for runtime CPUID features " Binbin Wu
2026-04-17 7:36 ` [RFC PATCH 24/27] KVM: x86: Skip paranoid CPUID check for KVM PV leafs when base is relocated Binbin Wu
2026-04-17 7:36 ` [RFC PATCH 25/27] KVM: x86: Add new KVM_CAP_X86_CPUID_PARANOID Binbin Wu
2026-04-17 7:36 ` [RFC PATCH 26/27] KVM: x86: Add a helper to query the allowed CPUID mask Binbin Wu
2026-04-17 7:36 ` [RFC PATCH 27/27] KVM: TDX: Replace hardcoded CPUID filtering with the allowed mask Binbin Wu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260417073610.3246316-11-binbin.wu@linux.intel.com \
--to=binbin.wu@linux.intel.com \
--cc=chao.gao@intel.com \
--cc=kai.huang@intel.com \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=rick.p.edgecombe@intel.com \
--cc=seanjc@google.com \
--cc=xiaoyao.li@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox