public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Marcelo Tosatti <mtosatti@redhat.com>
To: Sheng Yang <sheng@linux.intel.com>
Cc: Avi Kivity <avi@redhat.com>, Jan Kiszka <jan.kiszka@siemens.com>,
	Joerg Roedel <joerg.roedel@amd.com>,
	kvm@vger.kernel.org, "Yaozu (Eddie) Dong" <eddie.dong@intel.com>
Subject: Re: [PATCH v5] KVM: VMX: Execute WBINVD to keep data consistency with assigned devices
Date: Tue, 29 Jun 2010 10:25:41 -0300	[thread overview]
Message-ID: <20100629132541.GB27338@amt.cnet> (raw)
In-Reply-To: <1277781419-13227-1-git-send-email-sheng@linux.intel.com>

On Tue, Jun 29, 2010 at 11:16:59AM +0800, Sheng Yang wrote:
> Some guest device driver may leverage the "Non-Snoop" I/O, and explicitly
> WBINVD or CLFLUSH to a RAM space. Since migration may occur before WBINVD or
> CLFLUSH, we need to maintain data consistency either by:
> 1: flushing cache (wbinvd) when the guest is scheduled out if there is no
> wbinvd exit, or
> 2: execute wbinvd on all dirty physical CPUs when guest wbinvd exits.
> 
> Signed-off-by: Yaozu (Eddie) Dong <eddie.dong@intel.com>
> Signed-off-by: Sheng Yang <sheng@linux.intel.com>
> ---
>  arch/x86/include/asm/kvm_host.h |    6 +++++
>  arch/x86/kvm/emulate.c          |    5 +++-
>  arch/x86/kvm/svm.c              |    7 ++++++
>  arch/x86/kvm/vmx.c              |   10 ++++++++-
>  arch/x86/kvm/x86.c              |   41 +++++++++++++++++++++++++++++++++++++++
>  5 files changed, 67 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index a57cdea..2bda624 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -15,6 +15,7 @@
>  #include <linux/mm.h>
>  #include <linux/mmu_notifier.h>
>  #include <linux/tracepoint.h>
> +#include <linux/cpumask.h>
>  
>  #include <linux/kvm.h>
>  #include <linux/kvm_para.h>
> @@ -358,6 +359,8 @@ struct kvm_vcpu_arch {
>  
>  	/* fields used by HYPER-V emulation */
>  	u64 hv_vapic;
> +
> +	cpumask_var_t wbinvd_dirty_mask;
>  };
>  
>  struct kvm_arch {
> @@ -514,6 +517,8 @@ struct kvm_x86_ops {
>  
>  	void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
>  
> +	bool (*has_wbinvd_exit)(void);
> +
>  	const struct trace_print_flags *exit_reasons_str;
>  };
>  
> @@ -571,6 +576,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
>  int kvm_emulate_halt(struct kvm_vcpu *vcpu);
>  int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
>  int emulate_clts(struct kvm_vcpu *vcpu);
> +int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
>  
>  void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
>  int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index abb8cec..e8bdddc 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -3138,8 +3138,11 @@ twobyte_insn:
>  		emulate_clts(ctxt->vcpu);
>  		c->dst.type = OP_NONE;
>  		break;
> -	case 0x08:		/* invd */
>  	case 0x09:		/* wbinvd */
> +		kvm_emulate_wbinvd(ctxt->vcpu);
> +		c->dst.type = OP_NONE;
> +		break;
> +	case 0x08:		/* invd */
>  	case 0x0d:		/* GrpP (prefetch) */
>  	case 0x18:		/* Grp16 (prefetch/nop) */
>  		c->dst.type = OP_NONE;
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 587b99d..56c9b6b 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -3424,6 +3424,11 @@ static bool svm_rdtscp_supported(void)
>  	return false;
>  }
>  
> +static bool svm_has_wbinvd_exit(void)
> +{
> +	return true;
> +}
> +
>  static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
> @@ -3508,6 +3513,8 @@ static struct kvm_x86_ops svm_x86_ops = {
>  	.rdtscp_supported = svm_rdtscp_supported,
>  
>  	.set_supported_cpuid = svm_set_supported_cpuid,
> +
> +	.has_wbinvd_exit = svm_has_wbinvd_exit,
>  };
>  
>  static int __init svm_init(void)
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index e565689..806ab12 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -412,6 +412,12 @@ static inline bool cpu_has_virtual_nmis(void)
>  	return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
>  }
>  
> +static inline bool cpu_has_vmx_wbinvd_exit(void)
> +{
> +	return vmcs_config.cpu_based_2nd_exec_ctrl &
> +		SECONDARY_EXEC_WBINVD_EXITING;
> +}
> +
>  static inline bool report_flexpriority(void)
>  {
>  	return flexpriority_enabled;
> @@ -3400,7 +3406,7 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
>  static int handle_wbinvd(struct kvm_vcpu *vcpu)
>  {
>  	skip_emulated_instruction(vcpu);
> -	/* TODO: Add support for VT-d/pass-through device */
> +	kvm_emulate_wbinvd(vcpu);
>  	return 1;
>  }
>  
> @@ -4350,6 +4356,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
>  	.rdtscp_supported = vmx_rdtscp_supported,
>  
>  	.set_supported_cpuid = vmx_set_supported_cpuid,
> +
> +	.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
>  };
>  
>  static int __init vmx_init(void)
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index d0b9252..9a400ae 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1783,8 +1783,28 @@ out:
>  	return r;
>  }
>  
> +static void wbinvd_ipi(void *garbage)
> +{
> +	wbinvd();
> +}
> +
> +static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
> +{
> +	return vcpu->kvm->arch.iommu_domain &&
> +		!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
> +}
> +
>  void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  {
> +	/* Address WBINVD may be executed by guest */
> +	if (need_emulate_wbinvd(vcpu)) {
> +		if (kvm_x86_ops->has_wbinvd_exit())
> +			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
> +		else if (vcpu->cpu != -1)

					&& vcpu->cpu != cpu

> +			smp_call_function_single(vcpu->cpu,
> +					wbinvd_ipi, NULL, 1);
> +	}
> +
>  	kvm_x86_ops->vcpu_load(vcpu, cpu);
>  	if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
>  		unsigned long khz = cpufreq_quick_get(cpu);
> @@ -3650,6 +3670,21 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
>  	return X86EMUL_CONTINUE;
>  }
>  
> +int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
> +{
> +	if (!need_emulate_wbinvd(vcpu))
> +		return X86EMUL_CONTINUE;
> +
> +	if (kvm_x86_ops->has_wbinvd_exit()) {
> +		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
> +				wbinvd_ipi, NULL, 1);

work_on_cpu() loop instead of smp_call_function_many(), to avoid executing
wbinvd with interrupts disabled.


  parent reply	other threads:[~2010-06-29 13:26 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-06-28  3:36 [PATCH v3] KVM: VMX: Execute WBINVD to keep data consistency with assigned devices Sheng Yang
2010-06-28  3:56 ` Avi Kivity
2010-06-28  6:42   ` Sheng Yang
2010-06-28  6:56     ` Avi Kivity
2010-06-28  6:56       ` Sheng Yang
2010-06-28  7:08         ` Avi Kivity
2010-06-28  7:41           ` Sheng Yang
2010-06-28  8:07             ` Avi Kivity
2010-06-28  8:42               ` [PATCH v4] " Sheng Yang
2010-06-28  9:27                 ` Avi Kivity
2010-06-28  9:31                   ` Gleb Natapov
2010-06-28  9:35                     ` Avi Kivity
2010-06-29  3:16                       ` [PATCH v5] " Sheng Yang
2010-06-29  9:39                         ` Avi Kivity
2010-06-29 10:32                           ` Jan Kiszka
2010-06-29 10:42                             ` Avi Kivity
2010-06-29 12:32                               ` Roedel, Joerg
2010-06-29 12:37                                 ` Avi Kivity
2010-06-29 10:14                         ` Roedel, Joerg
2010-06-29 10:44                           ` Avi Kivity
2010-06-29 12:28                             ` Roedel, Joerg
2010-06-29 12:35                               ` Avi Kivity
2010-06-29 13:34                                 ` Roedel, Joerg
2010-06-29 13:25                         ` Marcelo Tosatti [this message]
2010-06-29 13:28                           ` Avi Kivity
2010-06-29 13:35                             ` Marcelo Tosatti
2010-06-29 13:50                               ` Avi Kivity
2010-06-29 14:31                                 ` Marcelo Tosatti
2010-06-28  7:30       ` [PATCH v3] " Dong, Eddie
2010-06-28  8:04         ` Avi Kivity
2010-06-28  8:16           ` Dong, Eddie
2010-06-28  8:45             ` Jan Kiszka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20100629132541.GB27338@amt.cnet \
    --to=mtosatti@redhat.com \
    --cc=avi@redhat.com \
    --cc=eddie.dong@intel.com \
    --cc=jan.kiszka@siemens.com \
    --cc=joerg.roedel@amd.com \
    --cc=kvm@vger.kernel.org \
    --cc=sheng@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox