kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 4/4 v4] KVM: VMX: VMXON/VMXOFF usage changes.
@ 2010-05-11 10:29 Xu, Dongxiao
  2010-05-12  0:43 ` Marcelo Tosatti
  0 siblings, 1 reply; 4+ messages in thread
From: Xu, Dongxiao @ 2010-05-11 10:29 UTC (permalink / raw)
  To: kvm@vger.kernel.org; +Cc: Avi Kivity, Marcelo Tosatti, Alexander Graf

From: Dongxiao Xu <dongxiao.xu@intel.com>

SDM suggests VMXON should be called before VMPTRLD, and VMXOFF
should be called after doing VMCLEAR.

Therefore in vmm coexistence case, we should firstly call VMXON
before any VMCS operation, and then call VMXOFF after the
operation is done.

Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
---
 arch/x86/kvm/vmx.c |   38 +++++++++++++++++++++++++++++++-------
 1 files changed, 31 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c536b9d..dbd47a7 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -168,6 +168,8 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
 
 static int init_rmode(struct kvm *kvm);
 static u64 construct_eptp(unsigned long root_hpa);
+static void kvm_cpu_vmxon(u64 addr);
+static void kvm_cpu_vmxoff(void);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -786,8 +788,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u64 tsc_this, delta, new_offset;
+	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
 
-	if (vmm_exclusive && vcpu->cpu != cpu)
+	if (!vmm_exclusive)
+		kvm_cpu_vmxon(phys_addr);
+	else if (vcpu->cpu != cpu)
 		vcpu_clear(vmx);
 
 	if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
@@ -833,8 +838,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
 {
 	__vmx_load_host_state(to_vmx(vcpu));
-	if (!vmm_exclusive)
+	if (!vmm_exclusive) {
 		__vcpu_clear(to_vmx(vcpu));
+		kvm_cpu_vmxoff();
+	}
 }
 
 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
@@ -1257,9 +1264,11 @@ static int hardware_enable(void *garbage)
 		       FEATURE_CONTROL_LOCKED |
 		       FEATURE_CONTROL_VMXON_ENABLED);
 	write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
-	kvm_cpu_vmxon(phys_addr);
 
-	ept_sync_global();
+	if (vmm_exclusive) {
+		kvm_cpu_vmxon(phys_addr);
+		ept_sync_global();
+	}
 
 	return 0;
 }
@@ -1285,8 +1294,10 @@ static void kvm_cpu_vmxoff(void)
 
 static void hardware_disable(void *garbage)
 {
-	vmclear_local_vcpus();
-	kvm_cpu_vmxoff();
+	if (vmm_exclusive) {
+		vmclear_local_vcpus();
+		kvm_cpu_vmxoff();
+	}
 	write_cr4(read_cr4() & ~X86_CR4_VMXE);
 }
 
@@ -3949,6 +3960,19 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 	kmem_cache_free(kvm_vcpu_cache, vmx);
 }
 
+static inline void vmcs_init(struct vmcs *vmcs)
+{
+	u64 phys_addr = __pa(per_cpu(vmxarea, raw_smp_processor_id()));
+
+	if (!vmm_exclusive)
+		kvm_cpu_vmxon(phys_addr);
+
+	vmcs_clear(vmcs);
+
+	if (!vmm_exclusive)
+		kvm_cpu_vmxoff();
+}
+
 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 {
 	int err;
@@ -3974,7 +3998,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 	if (!vmx->vmcs)
 		goto free_msrs;
 
-	vmcs_clear(vmx->vmcs);
+	vmcs_init(vmx->vmcs);
 
 	cpu = get_cpu();
 	vmx_vcpu_load(&vmx->vcpu, cpu);
-- 
1.6.3

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 4/4 v4] KVM: VMX: VMXON/VMXOFF usage changes.
  2010-05-11 10:29 [PATCH 4/4 v4] KVM: VMX: VMXON/VMXOFF usage changes Xu, Dongxiao
@ 2010-05-12  0:43 ` Marcelo Tosatti
  2010-05-12  6:13   ` Xu, Dongxiao
  0 siblings, 1 reply; 4+ messages in thread
From: Marcelo Tosatti @ 2010-05-12  0:43 UTC (permalink / raw)
  To: Xu, Dongxiao; +Cc: kvm@vger.kernel.org, Avi Kivity, Alexander Graf

On Tue, May 11, 2010 at 06:29:48PM +0800, Xu, Dongxiao wrote:
> From: Dongxiao Xu <dongxiao.xu@intel.com>
> 
> SDM suggests VMXON should be called before VMPTRLD, and VMXOFF
> should be called after doing VMCLEAR.
> 
> Therefore in vmm coexistence case, we should firstly call VMXON
> before any VMCS operation, and then call VMXOFF after the
> operation is done.
> 
> Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
> ---
>  arch/x86/kvm/vmx.c |   38 +++++++++++++++++++++++++++++++-------
>  1 files changed, 31 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index c536b9d..dbd47a7 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -168,6 +168,8 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
>  
>  static int init_rmode(struct kvm *kvm);
>  static u64 construct_eptp(unsigned long root_hpa);
> +static void kvm_cpu_vmxon(u64 addr);
> +static void kvm_cpu_vmxoff(void);
>  
>  static DEFINE_PER_CPU(struct vmcs *, vmxarea);
>  static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
> @@ -786,8 +788,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	u64 tsc_this, delta, new_offset;
> +	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
>  
> -	if (vmm_exclusive && vcpu->cpu != cpu)
> +	if (!vmm_exclusive)
> +		kvm_cpu_vmxon(phys_addr);
> +	else if (vcpu->cpu != cpu)
>  		vcpu_clear(vmx);
>  
>  	if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
> @@ -833,8 +838,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
>  {
>  	__vmx_load_host_state(to_vmx(vcpu));
> -	if (!vmm_exclusive)
> +	if (!vmm_exclusive) {
>  		__vcpu_clear(to_vmx(vcpu));
> +		kvm_cpu_vmxoff();
> +	}
>  }
>  
>  static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
> @@ -1257,9 +1264,11 @@ static int hardware_enable(void *garbage)
>  		       FEATURE_CONTROL_LOCKED |
>  		       FEATURE_CONTROL_VMXON_ENABLED);
>  	write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
> -	kvm_cpu_vmxon(phys_addr);
>  
> -	ept_sync_global();
> +	if (vmm_exclusive) {
> +		kvm_cpu_vmxon(phys_addr);
> +		ept_sync_global();
> +	}
>  
>  	return 0;

The documentation recommends usage of INVEPT all-context after execution
of VMXON and prior to execution of VMXOFF. Is it not necessary?

^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: [PATCH 4/4 v4] KVM: VMX: VMXON/VMXOFF usage changes.
  2010-05-12  0:43 ` Marcelo Tosatti
@ 2010-05-12  6:13   ` Xu, Dongxiao
  2010-05-12 19:58     ` Marcelo Tosatti
  0 siblings, 1 reply; 4+ messages in thread
From: Xu, Dongxiao @ 2010-05-12  6:13 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: kvm@vger.kernel.org, Avi Kivity, Alexander Graf

Marcelo Tosatti wrote:
> On Tue, May 11, 2010 at 06:29:48PM +0800, Xu, Dongxiao wrote:
>> From: Dongxiao Xu <dongxiao.xu@intel.com>
>> 
>> SDM suggests VMXON should be called before VMPTRLD, and VMXOFF
>> should be called after doing VMCLEAR.
>> 
>> Therefore in vmm coexistence case, we should firstly call VMXON
>> before any VMCS operation, and then call VMXOFF after the
>> operation is done.
>> 
>> Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
>> ---
>>  arch/x86/kvm/vmx.c |   38 +++++++++++++++++++++++++++++++-------
>>  1 files changed, 31 insertions(+), 7 deletions(-)
>> 
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index c536b9d..dbd47a7 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -168,6 +168,8 @@ static inline struct vcpu_vmx *to_vmx(struct
>> kvm_vcpu *vcpu) 
>> 
>>  static int init_rmode(struct kvm *kvm);
>>  static u64 construct_eptp(unsigned long root_hpa);
>> +static void kvm_cpu_vmxon(u64 addr);
>> +static void kvm_cpu_vmxoff(void);
>> 
>>  static DEFINE_PER_CPU(struct vmcs *, vmxarea);
>>  static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
>> @@ -786,8 +788,11 @@ static void vmx_vcpu_load(struct kvm_vcpu
>>  	*vcpu, int cpu)  { struct vcpu_vmx *vmx = to_vmx(vcpu);
>>  	u64 tsc_this, delta, new_offset;
>> +	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
>> 
>> -	if (vmm_exclusive && vcpu->cpu != cpu)
>> +	if (!vmm_exclusive)
>> +		kvm_cpu_vmxon(phys_addr);
>> +	else if (vcpu->cpu != cpu)
>>  		vcpu_clear(vmx);
>> 
>>  	if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
>> @@ -833,8 +838,10 @@ static void vmx_vcpu_load(struct kvm_vcpu
>>  *vcpu, int cpu) static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
>>  {
>>  	__vmx_load_host_state(to_vmx(vcpu));
>> -	if (!vmm_exclusive)
>> +	if (!vmm_exclusive) {
>>  		__vcpu_clear(to_vmx(vcpu));
>> +		kvm_cpu_vmxoff();
>> +	}
>>  }
>> 
>>  static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
>> @@ -1257,9 +1264,11 @@ static int hardware_enable(void *garbage)
>>  		       FEATURE_CONTROL_LOCKED |
>>  		       FEATURE_CONTROL_VMXON_ENABLED);
>>  	write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug
>> safe */ -	kvm_cpu_vmxon(phys_addr); 
>> 
>> -	ept_sync_global();
>> +	if (vmm_exclusive) {
>> +		kvm_cpu_vmxon(phys_addr);
>> +		ept_sync_global();
>> +	}
>> 
>>  	return 0;
> 
> The documentation recommends usage of INVEPT all-context after
> execution of VMXON and prior to execution of VMXOFF. Is it not
> necessary? 

After adding the patch, when vCPU is scheduled in a CPU, it will call
tlb_flush() to invalidate the EPT and VPID cache/tlb for the vCPU.
Therefore the correctness for KVM is guaranteed. 

Thanks,
Dongxiao

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 4/4 v4] KVM: VMX: VMXON/VMXOFF usage changes.
  2010-05-12  6:13   ` Xu, Dongxiao
@ 2010-05-12 19:58     ` Marcelo Tosatti
  0 siblings, 0 replies; 4+ messages in thread
From: Marcelo Tosatti @ 2010-05-12 19:58 UTC (permalink / raw)
  To: Xu, Dongxiao; +Cc: kvm@vger.kernel.org, Avi Kivity, Alexander Graf

On Wed, May 12, 2010 at 02:13:26PM +0800, Xu, Dongxiao wrote:
> >> -	ept_sync_global();
> >> +	if (vmm_exclusive) {
> >> +		kvm_cpu_vmxon(phys_addr);
> >> +		ept_sync_global();
> >> +	}
> >> 
> >>  	return 0;
> > 
> > The documentation recommends usage of INVEPT all-context after
> > execution of VMXON and prior to execution of VMXOFF. Is it not
> > necessary? 
> 
> After adding the patch, when vCPU is scheduled in a CPU, it will call
> tlb_flush() to invalidate the EPT and VPID cache/tlb for the vCPU.
> Therefore the correctness for KVM is guaranteed. 

Correct. Applied all, thanks.


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2010-05-12 20:05 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-05-11 10:29 [PATCH 4/4 v4] KVM: VMX: VMXON/VMXOFF usage changes Xu, Dongxiao
2010-05-12  0:43 ` Marcelo Tosatti
2010-05-12  6:13   ` Xu, Dongxiao
2010-05-12 19:58     ` Marcelo Tosatti

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).