From mboxrd@z Thu Jan 1 00:00:00 1970 From: Paolo Bonzini Subject: Re: [PATCH v2 1/2] KVM: nVMX: Validate EFER values for VM_ENTRY/EXIT_LOAD_IA32_EFER Date: Mon, 22 Apr 2013 10:51:33 +0200 Message-ID: <5174FA15.9050308@redhat.com> References: <516A8879.6050202@web.de> <51725754.2010304@web.de> Mime-Version: 1.0 Content-Type: text/plain; charset=windows-1252 Content-Transfer-Encoding: QUOTED-PRINTABLE Cc: Gleb Natapov , Marcelo Tosatti , kvm , "Nadav Har'El" To: Jan Kiszka Return-path: Received: from mx1.redhat.com ([209.132.183.28]:59620 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754760Ab3DVIvw (ORCPT ); Mon, 22 Apr 2013 04:51:52 -0400 In-Reply-To: <51725754.2010304@web.de> Sender: kvm-owner@vger.kernel.org List-ID: Il 20/04/2013 10:52, Jan Kiszka ha scritto: > As we may emulate the loading of EFER on VM-entry and VM-exit, implem= ent > the checks that VMX performs on the guest and host values on vmlaunch= / > vmresume. Factor out kvm_valid_efer for this purpose which checks for > set reserved bits. >=20 > Signed-off-by: Jan Kiszka > --- >=20 > Changes in v2: > - refactored if clauses as requested by Paolo > - fixed typo in comment found my Marcelo >=20 > arch/x86/include/asm/kvm_host.h | 1 + > arch/x86/kvm/vmx.c | 40 +++++++++++++++++++++++++++++= ++++++++++ > arch/x86/kvm/x86.c | 29 ++++++++++++++++++--------- > 3 files changed, 60 insertions(+), 10 deletions(-) >=20 > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/k= vm_host.h > index 599f98b..18635ae 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -809,6 +809,7 @@ static inline int emulate_instruction(struct kvm_= vcpu *vcpu, > } > =20 > void kvm_enable_efer_bits(u64); > +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); > int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); > int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); > =20 > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index 19aebc7..e3b951f 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -7327,6 +7327,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu= , bool launch) > struct vcpu_vmx *vmx =3D to_vmx(vcpu); > int cpu; > struct loaded_vmcs *vmcs02; > + bool ia32e; > =20 > if (!nested_vmx_check_permission(vcpu) || > !nested_vmx_check_vmcs12(vcpu)) > @@ -7415,6 +7416,45 @@ static int nested_vmx_run(struct kvm_vcpu *vcp= u, bool launch) > } > =20 > /* > + * If the =93load IA32_EFER=94 VM-entry control is 1, the following= checks > + * are performed on the field for the IA32_EFER MSR: > + * - Bits reserved in the IA32_EFER MSR must be 0. > + * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value o= f > + * the IA-32e mode guest VM-exit control. It must also be identic= al > + * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to > + * CR0.PG) is 1. > + */ > + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) { > + ia32e =3D (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) !=3D 0= ; > + if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || > + ia32e !=3D !!(vmcs12->guest_ia32_efer & EFER_LMA) || > + ((vmcs12->guest_cr0 & X86_CR0_PG) && > + ia32e !=3D !!(vmcs12->guest_ia32_efer & EFER_LME))) { > + nested_vmx_entry_failure(vcpu, vmcs12, > + EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); > + return 1; > + } > + } > + > + /* > + * If the load IA32_EFER VM-exit control is 1, bits reserved in the > + * IA32_EFER MSR must be 0 in the field for that register. In addit= ion, > + * the values of the LMA and LME bits in the field must each be tha= t of > + * the host address-space size VM-exit control. > + */ > + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { > + ia32e =3D (vmcs12->vm_exit_controls & > + VM_EXIT_HOST_ADDR_SPACE_SIZE) !=3D 0; > + if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || > + ia32e !=3D !!(vmcs12->host_ia32_efer & EFER_LMA) || > + ia32e !=3D !!(vmcs12->host_ia32_efer & EFER_LME)) { > + nested_vmx_entry_failure(vcpu, vmcs12, > + EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); > + return 1; > + } > + } Looks good, difficult to do better with C's operator precedence rules. Paolo > + /* > * We're finally done with prerequisite checking, and can start wit= h > * the nested entry. > */ > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 50e2e10..482784d 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -845,23 +845,17 @@ static const u32 emulated_msrs[] =3D { > MSR_IA32_MCG_CTL, > }; > =20 > -static int set_efer(struct kvm_vcpu *vcpu, u64 efer) > +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) > { > - u64 old_efer =3D vcpu->arch.efer; > - > if (efer & efer_reserved_bits) > - return 1; > - > - if (is_paging(vcpu) > - && (vcpu->arch.efer & EFER_LME) !=3D (efer & EFER_LME)) > - return 1; > + return false; > =20 > if (efer & EFER_FFXSR) { > struct kvm_cpuid_entry2 *feat; > =20 > feat =3D kvm_find_cpuid_entry(vcpu, 0x80000001, 0); > if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) > - return 1; > + return false; > } > =20 > if (efer & EFER_SVME) { > @@ -869,9 +863,24 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 e= fer) > =20 > feat =3D kvm_find_cpuid_entry(vcpu, 0x80000001, 0); > if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) > - return 1; > + return false; > } > =20 > + return true; > +} > +EXPORT_SYMBOL_GPL(kvm_valid_efer); > + > +static int set_efer(struct kvm_vcpu *vcpu, u64 efer) > +{ > + u64 old_efer =3D vcpu->arch.efer; > + > + if (!kvm_valid_efer(vcpu, efer)) > + return 1; > + > + if (is_paging(vcpu) > + && (vcpu->arch.efer & EFER_LME) !=3D (efer & EFER_LME)) > + return 1; > + > efer &=3D ~EFER_LMA; > efer |=3D vcpu->arch.efer & EFER_LMA; > =20 >=20