From: Sheng Yang <sheng@linux.intel.com>
To: Avi Kivity <avi@redhat.com>
Cc: Jan Kiszka <jan.kiszka@siemens.com>,
Marcelo Tosatti <mtosatti@redhat.com>,
Joerg Roedel <joerg.roedel@amd.com>,
kvm@vger.kernel.org, Sheng Yang <sheng@linux.intel.com>,
"Yaozu (Eddie) Dong" <eddie.dong@intel.com>
Subject: [PATCH v3] KVM: VMX: Execute WBINVD to keep data consistency with assigned devices
Date: Mon, 28 Jun 2010 11:36:27 +0800 [thread overview]
Message-ID: <1277696187-3571-1-git-send-email-sheng@linux.intel.com> (raw)
Some guest device driver may leverage the "Non-Snoop" I/O, and explicitly
WBINVD or CLFLUSH to a RAM space. Since migration may occur before WBINVD or
CLFLUSH, we need to maintain data consistency either by:
1: flushing cache (wbinvd) when the guest is scheduled out if there is no
wbinvd exit, or
2: execute wbinvd on all dirty physical CPUs when guest wbinvd exits.
Signed-off-by: Yaozu (Eddie) Dong <eddie.dong@intel.com>
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
OK, I've checked AMD's spec, they do intercept WBINVD. So we can do it like
this.
arch/x86/include/asm/kvm_host.h | 6 ++++++
arch/x86/kvm/emulate.c | 5 ++++-
arch/x86/kvm/svm.c | 7 +++++++
arch/x86/kvm/vmx.c | 10 +++++++++-
arch/x86/kvm/x86.c | 29 +++++++++++++++++++++++++++++
5 files changed, 55 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a57cdea..b385d6f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/tracepoint.h>
+#include <linux/cpumask.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
@@ -358,6 +359,8 @@ struct kvm_vcpu_arch {
/* fields used by HYPER-V emulation */
u64 hv_vapic;
+
+ cpumask_t wbinvd_dirty_mask;
};
struct kvm_arch {
@@ -514,6 +517,8 @@ struct kvm_x86_ops {
void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
+ bool (*has_wbinvd_exit)(void);
+
const struct trace_print_flags *exit_reasons_str;
};
@@ -571,6 +576,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
int emulate_clts(struct kvm_vcpu *vcpu);
+int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index abb8cec..e8bdddc 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3138,8 +3138,11 @@ twobyte_insn:
emulate_clts(ctxt->vcpu);
c->dst.type = OP_NONE;
break;
- case 0x08: /* invd */
case 0x09: /* wbinvd */
+ kvm_emulate_wbinvd(ctxt->vcpu);
+ c->dst.type = OP_NONE;
+ break;
+ case 0x08: /* invd */
case 0x0d: /* GrpP (prefetch) */
case 0x18: /* Grp16 (prefetch/nop) */
c->dst.type = OP_NONE;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 587b99d..56c9b6b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3424,6 +3424,11 @@ static bool svm_rdtscp_supported(void)
return false;
}
+static bool svm_has_wbinvd_exit(void)
+{
+ return true;
+}
+
static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3508,6 +3513,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.rdtscp_supported = svm_rdtscp_supported,
.set_supported_cpuid = svm_set_supported_cpuid,
+
+ .has_wbinvd_exit = svm_has_wbinvd_exit,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e565689..806ab12 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -412,6 +412,12 @@ static inline bool cpu_has_virtual_nmis(void)
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
}
+static inline bool cpu_has_vmx_wbinvd_exit(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_WBINVD_EXITING;
+}
+
static inline bool report_flexpriority(void)
{
return flexpriority_enabled;
@@ -3400,7 +3406,7 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
static int handle_wbinvd(struct kvm_vcpu *vcpu)
{
skip_emulated_instruction(vcpu);
- /* TODO: Add support for VT-d/pass-through device */
+ kvm_emulate_wbinvd(vcpu);
return 1;
}
@@ -4350,6 +4356,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.rdtscp_supported = vmx_rdtscp_supported,
.set_supported_cpuid = vmx_set_supported_cpuid,
+
+ .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
};
static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d0b9252..cfb6fad 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1783,8 +1783,22 @@ out:
return r;
}
+static void wbinvd_ipi(void *garbage)
+{
+ wbinvd();
+}
+
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ /* Address WBINVD may be executed by guest */
+ if (vcpu->kvm->arch.iommu_domain) {
+ if (kvm_x86_ops->has_wbinvd_exit())
+ cpu_set(cpu, vcpu->arch.wbinvd_dirty_mask);
+ else if (vcpu->cpu != -1)
+ smp_call_function_single(vcpu->cpu,
+ wbinvd_ipi, NULL, 1);
+ }
+
kvm_x86_ops->vcpu_load(vcpu, cpu);
if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
unsigned long khz = cpufreq_quick_get(cpu);
@@ -3650,6 +3664,21 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
return X86EMUL_CONTINUE;
}
+int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->kvm->arch.iommu_domain)
+ return X86EMUL_CONTINUE;
+
+ if (kvm_x86_ops->has_wbinvd_exit()) {
+ smp_call_function_many(&vcpu->arch.wbinvd_dirty_mask,
+ wbinvd_ipi, NULL, 1);
+ cpus_clear(vcpu->arch.wbinvd_dirty_mask);
+ } else
+ wbinvd();
+ return X86EMUL_CONTINUE;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
+
int emulate_clts(struct kvm_vcpu *vcpu)
{
kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
--
1.7.0.1
next reply other threads:[~2010-06-28 3:38 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-06-28 3:36 Sheng Yang [this message]
2010-06-28 3:56 ` [PATCH v3] KVM: VMX: Execute WBINVD to keep data consistency with assigned devices Avi Kivity
2010-06-28 6:42 ` Sheng Yang
2010-06-28 6:56 ` Avi Kivity
2010-06-28 6:56 ` Sheng Yang
2010-06-28 7:08 ` Avi Kivity
2010-06-28 7:41 ` Sheng Yang
2010-06-28 8:07 ` Avi Kivity
2010-06-28 8:42 ` [PATCH v4] " Sheng Yang
2010-06-28 9:27 ` Avi Kivity
2010-06-28 9:31 ` Gleb Natapov
2010-06-28 9:35 ` Avi Kivity
2010-06-29 3:16 ` [PATCH v5] " Sheng Yang
2010-06-29 9:39 ` Avi Kivity
2010-06-29 10:32 ` Jan Kiszka
2010-06-29 10:42 ` Avi Kivity
2010-06-29 12:32 ` Roedel, Joerg
2010-06-29 12:37 ` Avi Kivity
2010-06-29 10:14 ` Roedel, Joerg
2010-06-29 10:44 ` Avi Kivity
2010-06-29 12:28 ` Roedel, Joerg
2010-06-29 12:35 ` Avi Kivity
2010-06-29 13:34 ` Roedel, Joerg
2010-06-29 13:25 ` Marcelo Tosatti
2010-06-29 13:28 ` Avi Kivity
2010-06-29 13:35 ` Marcelo Tosatti
2010-06-29 13:50 ` Avi Kivity
2010-06-29 14:31 ` Marcelo Tosatti
2010-06-28 7:30 ` [PATCH v3] " Dong, Eddie
2010-06-28 8:04 ` Avi Kivity
2010-06-28 8:16 ` Dong, Eddie
2010-06-28 8:45 ` Jan Kiszka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1277696187-3571-1-git-send-email-sheng@linux.intel.com \
--to=sheng@linux.intel.com \
--cc=avi@redhat.com \
--cc=eddie.dong@intel.com \
--cc=jan.kiszka@siemens.com \
--cc=joerg.roedel@amd.com \
--cc=kvm@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox