kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/5] KVM: nVMX: refactor segment checks, make the code more clean and straightforward
@ 2015-08-20 19:36 Eugene Korenevsky
  2015-09-07 11:37 ` Paolo Bonzini
  0 siblings, 1 reply; 2+ messages in thread
From: Eugene Korenevsky @ 2015-08-20 19:36 UTC (permalink / raw)
  To: kvm; +Cc: Paolo Bonzini

Prepare for subsequent changes. Extract calls for segment checking in protected
and 64-bit mode. This should be done to avoid overbloating of
get_vmx_mem_address() function, even if kvm_queue_exception_e() is called
twice.

Signed-off-by: Eugene Korenevsky <ekorenevsky@gmail.com>
---
 arch/x86/kvm/vmx.c | 106 +++++++++++++++++++++++++++++------------------------
 1 file changed, 59 insertions(+), 47 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index da1590e..32d2979 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6335,6 +6335,59 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
 	return HRTIMER_NORESTART;
 }
 
+/* Long mode: #GP(0)/#SS(0) if the memory address is in
+ * a non-canonical form.
+ */
+static int vmx_longmode_seg_check(struct kvm_vcpu *vcpu, int seg_reg, gva_t la)
+{
+	if (is_noncanonical_address(la)) {
+		kvm_queue_exception_e(vcpu,
+				      seg_reg == VCPU_SREG_SS ?
+						SS_VECTOR : GP_VECTOR,
+				      0);
+		return 1;
+	}
+	return 0;
+}
+
+/* Protected mode: apply checks for segment validity in the following order:
+ * - segment type check (#GP(0) may be thrown)
+ * - usability check (#GP(0)/#SS(0))
+ * - limit check (#GP(0)/#SS(0))
+ */
+static int vmx_protmode_seg_check(struct kvm_vcpu *vcpu,
+				  int seg, const struct kvm_segment *s,
+				  bool wr, int mem_op_size, gva_t off)
+{
+	bool exn;
+
+	/* #GP(0) if the destination operand is located in a read-only data
+	 * segment or any code segment.
+	 * #GP(0) if the source operand is located in an execute-only code
+	 * segment.
+	 */
+	if (wr)
+		exn = ((s->type & 0xa) == 0 || (s->type & 8));
+	else
+		exn = ((s->type & 0xa) == 8);
+	if (exn) {
+		kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+		return 1;
+	}
+	/* #GP(0)/#SS(0) if the segment is unusable. */
+	exn = (s->unusable != 0);
+	/* #GP(0)/#SS(0) if the memory operand is outside the segment limit. */
+	exn = exn || (off + mem_op_size > s->limit);
+	if (exn) {
+		kvm_queue_exception_e(vcpu,
+				      seg == VCPU_SREG_SS ?
+						SS_VECTOR : GP_VECTOR,
+				      0);
+		return 1;
+	}
+	return 0;
+}
+
 /*
  * Decode the memory-address operand of a vmx instruction, as recorded on an
  * exit caused by such an instruction (run by a guest hypervisor).
@@ -6346,7 +6399,6 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
 				 u32 vmx_instruction_info, bool wr, gva_t *ret)
 {
 	gva_t off;
-	bool exn;
 	struct kvm_segment s;
 
 	/*
@@ -6381,55 +6433,15 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
 	vmx_get_segment(vcpu, &s, seg_reg);
 	*ret = s.base + off;
 
+	if (is_long_mode(vcpu))
+		return vmx_longmode_seg_check(vcpu, seg_reg, *ret);
+
 	if (addr_size == 1) /* 32 bit */
 		*ret &= 0xffffffff;
 
-	/* Checks for #GP/#SS exceptions. */
-	exn = false;
-	if (is_protmode(vcpu)) {
-		/* Protected mode: apply checks for segment validity in the
-		 * following order:
-		 * - segment type check (#GP(0) may be thrown)
-		 * - usability check (#GP(0)/#SS(0))
-		 * - limit check (#GP(0)/#SS(0))
-		 */
-		if (wr)
-			/* #GP(0) if the destination operand is located in a
-			 * read-only data segment or any code segment.
-			 */
-			exn = ((s.type & 0xa) == 0 || (s.type & 8));
-		else
-			/* #GP(0) if the source operand is located in an
-			 * execute-only code segment
-			 */
-			exn = ((s.type & 0xa) == 8);
-	}
-	if (exn) {
-		kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
-		return 1;
-	}
-	if (is_long_mode(vcpu)) {
-		/* Long mode: #GP(0)/#SS(0) if the memory address is in a
-		 * non-canonical form. This is an only check for long mode.
-		 */
-		exn = is_noncanonical_address(*ret);
-	} else if (is_protmode(vcpu)) {
-		/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
-		 */
-		exn = (s.unusable != 0);
-		/* Protected mode: #GP(0)/#SS(0) if the memory
-		 * operand is outside the segment limit.
-		 */
-		exn = exn || (off + sizeof(u64) > s.limit);
-	}
-	if (exn) {
-		kvm_queue_exception_e(vcpu,
-				      seg_reg == VCPU_SREG_SS ?
-						SS_VECTOR : GP_VECTOR,
-				      0);
-		return 1;
-	}
-
+	if (is_protmode(vcpu))
+		return vmx_protmode_seg_check(vcpu, seg_reg, &s,
+					      wr, sizeof(u64), off);
 	return 0;
 }
 
-- 
2.1.4


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH 1/5] KVM: nVMX: refactor segment checks, make the code more clean and straightforward
  2015-08-20 19:36 [PATCH 1/5] KVM: nVMX: refactor segment checks, make the code more clean and straightforward Eugene Korenevsky
@ 2015-09-07 11:37 ` Paolo Bonzini
  0 siblings, 0 replies; 2+ messages in thread
From: Paolo Bonzini @ 2015-09-07 11:37 UTC (permalink / raw)
  To: Eugene Korenevsky, kvm



On 20/08/2015 21:36, Eugene Korenevsky wrote:
> Prepare for subsequent changes. Extract calls for segment checking in protected
> and 64-bit mode. This should be done to avoid overbloating of
> get_vmx_mem_address() function, even if kvm_queue_exception_e() is called
> twice.

The idea behind the patch is okay, but all VMX root-mode instructions
require CR0.PE = 1, so vmx_protmode_seg_check must always be run.
Please adjust nested_vmx_check_permission to check CR0.PE as well.

Paolo

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2015-09-07 11:37 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-08-20 19:36 [PATCH 1/5] KVM: nVMX: refactor segment checks, make the code more clean and straightforward Eugene Korenevsky
2015-09-07 11:37 ` Paolo Bonzini

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).