From mboxrd@z Thu Jan 1 00:00:00 1970 From: Eugene Korenevsky Subject: [PATCH 1/5] KVM: nVMX: refactor segment checks, make the code more clean and straightforward Date: Thu, 20 Aug 2015 22:36:25 +0300 Message-ID: <20150820193625.GA3521@unote> Mime-Version: 1.0 Content-Type: text/plain; charset=us-ascii Cc: Paolo Bonzini To: kvm@vger.kernel.org Return-path: Received: from mail-lb0-f179.google.com ([209.85.217.179]:33668 "EHLO mail-lb0-f179.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751693AbbHTTg2 (ORCPT ); Thu, 20 Aug 2015 15:36:28 -0400 Received: by lbbsx3 with SMTP id sx3so30534364lbb.0 for ; Thu, 20 Aug 2015 12:36:26 -0700 (PDT) Content-Disposition: inline Sender: kvm-owner@vger.kernel.org List-ID: Prepare for subsequent changes. Extract calls for segment checking in protected and 64-bit mode. This should be done to avoid overbloating of get_vmx_mem_address() function, even if kvm_queue_exception_e() is called twice. Signed-off-by: Eugene Korenevsky --- arch/x86/kvm/vmx.c | 106 +++++++++++++++++++++++++++++------------------------ 1 file changed, 59 insertions(+), 47 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index da1590e..32d2979 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6335,6 +6335,59 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) return HRTIMER_NORESTART; } +/* Long mode: #GP(0)/#SS(0) if the memory address is in + * a non-canonical form. + */ +static int vmx_longmode_seg_check(struct kvm_vcpu *vcpu, int seg_reg, gva_t la) +{ + if (is_noncanonical_address(la)) { + kvm_queue_exception_e(vcpu, + seg_reg == VCPU_SREG_SS ? + SS_VECTOR : GP_VECTOR, + 0); + return 1; + } + return 0; +} + +/* Protected mode: apply checks for segment validity in the following order: + * - segment type check (#GP(0) may be thrown) + * - usability check (#GP(0)/#SS(0)) + * - limit check (#GP(0)/#SS(0)) + */ +static int vmx_protmode_seg_check(struct kvm_vcpu *vcpu, + int seg, const struct kvm_segment *s, + bool wr, int mem_op_size, gva_t off) +{ + bool exn; + + /* #GP(0) if the destination operand is located in a read-only data + * segment or any code segment. + * #GP(0) if the source operand is located in an execute-only code + * segment. + */ + if (wr) + exn = ((s->type & 0xa) == 0 || (s->type & 8)); + else + exn = ((s->type & 0xa) == 8); + if (exn) { + kvm_queue_exception_e(vcpu, GP_VECTOR, 0); + return 1; + } + /* #GP(0)/#SS(0) if the segment is unusable. */ + exn = (s->unusable != 0); + /* #GP(0)/#SS(0) if the memory operand is outside the segment limit. */ + exn = exn || (off + mem_op_size > s->limit); + if (exn) { + kvm_queue_exception_e(vcpu, + seg == VCPU_SREG_SS ? + SS_VECTOR : GP_VECTOR, + 0); + return 1; + } + return 0; +} + /* * Decode the memory-address operand of a vmx instruction, as recorded on an * exit caused by such an instruction (run by a guest hypervisor). @@ -6346,7 +6399,6 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, u32 vmx_instruction_info, bool wr, gva_t *ret) { gva_t off; - bool exn; struct kvm_segment s; /* @@ -6381,55 +6433,15 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, vmx_get_segment(vcpu, &s, seg_reg); *ret = s.base + off; + if (is_long_mode(vcpu)) + return vmx_longmode_seg_check(vcpu, seg_reg, *ret); + if (addr_size == 1) /* 32 bit */ *ret &= 0xffffffff; - /* Checks for #GP/#SS exceptions. */ - exn = false; - if (is_protmode(vcpu)) { - /* Protected mode: apply checks for segment validity in the - * following order: - * - segment type check (#GP(0) may be thrown) - * - usability check (#GP(0)/#SS(0)) - * - limit check (#GP(0)/#SS(0)) - */ - if (wr) - /* #GP(0) if the destination operand is located in a - * read-only data segment or any code segment. - */ - exn = ((s.type & 0xa) == 0 || (s.type & 8)); - else - /* #GP(0) if the source operand is located in an - * execute-only code segment - */ - exn = ((s.type & 0xa) == 8); - } - if (exn) { - kvm_queue_exception_e(vcpu, GP_VECTOR, 0); - return 1; - } - if (is_long_mode(vcpu)) { - /* Long mode: #GP(0)/#SS(0) if the memory address is in a - * non-canonical form. This is an only check for long mode. - */ - exn = is_noncanonical_address(*ret); - } else if (is_protmode(vcpu)) { - /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. - */ - exn = (s.unusable != 0); - /* Protected mode: #GP(0)/#SS(0) if the memory - * operand is outside the segment limit. - */ - exn = exn || (off + sizeof(u64) > s.limit); - } - if (exn) { - kvm_queue_exception_e(vcpu, - seg_reg == VCPU_SREG_SS ? - SS_VECTOR : GP_VECTOR, - 0); - return 1; - } - + if (is_protmode(vcpu)) + return vmx_protmode_seg_check(vcpu, seg_reg, &s, + wr, sizeof(u64), off); return 0; } -- 2.1.4