From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail.linutronix.de (146.0.238.70:993) by crypto-ml.lab.linutronix.de with IMAP4-SSL for ; 24 Feb 2019 15:11:55 -0000 Received: from mga02.intel.com ([134.134.136.20]) by Galois.linutronix.de with esmtps (TLS1.2:DHE_RSA_AES_256_CBC_SHA256:256) (Exim 4.80) (envelope-from ) id 1gxvNx-0001Qu-6v for speck@linutronix.de; Sun, 24 Feb 2019 16:08:09 +0100 From: Andi Kleen Subject: [MODERATED] [PATCH v6 09/43] MDSv6 Date: Sun, 24 Feb 2019 07:07:15 -0800 Message-Id: <01242a891f23111fa2ef4831ff49dddb38bc0636.1551019522.git.ak@linux.intel.com> In-Reply-To: References: In-Reply-To: References: To: speck@linutronix.de Cc: Andi Kleen List-ID: From: Andi Kleen Subject: x86/speculation/mds: Handle VMENTRY clear for CPUs without l1tf Some Atom CPUs don't have L1TF, but have (parts of) MDS. Normally we rely on the L1TF L1D clear on vm entry to avoid leaking data to the guest. But these Atom CPUs don't support the L1D clear MSR. Add special code to trigger VERW explicitly on KVM guest entry. We use similar logic as the conditional l1d flush, which is default and currently cannot be overriden. Signed-off-by: Andi Kleen --- arch/x86/include/asm/cpufeatures.h | 4 ++++ arch/x86/kernel/cpu/bugs.c | 3 +++ arch/x86/kernel/cpu/common.c | 14 +++++++++++++- arch/x86/kvm/vmx/vmx.c | 20 +++++++++++++++----- 4 files changed, 35 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 9e934626ea88..80ab5d026157 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -353,6 +353,9 @@ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ +/* Linux defined features, word 19 */ +#define X86_FEATURE_MDS_VMENTRY_FLUSH (19*32+0) /* MDS needs extra flush for vmentry */ + /* * BUG word(s) */ @@ -384,5 +387,6 @@ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ +#define X86_BUG_MDS_NO_L1TF X86_BUG(20) /* MDS but no L1TF L1D flush */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4114b4f94c1d..26b890f18239 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1087,6 +1087,9 @@ static void mds_select_mitigation(void) if (cmdline_find_option_bool(boot_command_line, "mds=full") || cmdline_find_option_bool(boot_command_line, "mds=auto")) setup_force_cpu_cap(X86_FEATURE_VERW); + + if (boot_cpu_has(X86_FEATURE_VERW) && boot_cpu_has_bug(X86_BUG_MDS_NO_L1TF)) + setup_force_cpu_cap(X86_FEATURE_MDS_VMENTRY_FLUSH); } #ifdef CONFIG_SYSFS diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index bac5a3a38f0d..94d0eeb0a94b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1006,6 +1006,15 @@ static const __initconst struct x86_cpu_id cpu_no_mds[] = { {} }; +/* CPUs with MDS, but not L1TF, that don't have L1D flush */ +static const __initconst struct x86_cpu_id cpu_mds_no_l1d[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, + {} +}; + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) { u64 ia32_cap = 0; @@ -1029,8 +1038,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && !x86_match_cpu(cpu_no_mds)) && - !(ia32_cap & ARCH_CAP_MDS_NO)) + !(ia32_cap & ARCH_CAP_MDS_NO)) { setup_force_cpu_bug(X86_BUG_MDS); + if (x86_match_cpu(cpu_mds_no_l1d)) + setup_force_cpu_bug(X86_BUG_MDS_NO_L1TF); + } if (x86_match_cpu(cpu_no_meltdown)) return; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 30a6bcd735ec..fb2be25a4aa6 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5866,27 +5866,36 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) static void vmx_l1d_flush(struct kvm_vcpu *vcpu) { int size = PAGE_SIZE << L1D_CACHE_ORDER; + bool flush_l1d = vcpu->arch.l1tf_flush_l1d; + + flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); + + /* CPUs with MDS_VMENTRY_FLUSH never use the L1D flush below. */ + if (static_cpu_has(X86_FEATURE_MDS_VMENTRY_FLUSH)) { + vcpu->arch.l1tf_flush_l1d = false; + kvm_clear_cpu_l1tf_flush_l1d(); + if (!flush_l1d && !static_key_enabled(&force_cpu_clear)) + return; + clear_cpu(); + return; + } /* * This code is only executed when the the flush mode is 'cond' or * 'always' */ if (static_branch_likely(&vmx_l1d_flush_cond)) { - bool flush_l1d; - /* * Clear the per-vcpu flush bit, it gets set again * either from vcpu_run() or from one of the unsafe * VMEXIT handlers. */ - flush_l1d = vcpu->arch.l1tf_flush_l1d; vcpu->arch.l1tf_flush_l1d = false; /* * Clear the per-cpu flush bit, it gets set again from * the interrupt handlers. */ - flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); kvm_clear_cpu_l1tf_flush_l1d(); if (!flush_l1d) @@ -6369,7 +6378,8 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) evmcs_rsp = static_branch_unlikely(&enable_evmcs) ? (unsigned long)¤t_evmcs->host_rsp : 0; - if (static_branch_unlikely(&vmx_l1d_should_flush)) + if (static_branch_unlikely(&vmx_l1d_should_flush) || + static_cpu_has(X86_FEATURE_MDS_VMENTRY_FLUSH)) vmx_l1d_flush(vcpu); asm( -- 2.17.2