linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/2] Series short description
@ 2015-03-02 18:04 Joel Schopp
  2015-03-02 18:04 ` [PATCH v2 1/2] kvm: x86: make kvm_emulate_* consistant Joel Schopp
  2015-03-02 18:04 ` [PATCH v2 2/2] x86: svm: make wbinvd faster Joel Schopp
  0 siblings, 2 replies; 6+ messages in thread
From: Joel Schopp @ 2015-03-02 18:04 UTC (permalink / raw)
  To: Gleb Natapov, Paolo Bonzini, kvm
  Cc: Joerg Roedel, Borislav Petkov, linux-kernel, David Kaplan,
	rkrcmar

Review comments from v1 that used kvm_emulate_wbinvd() pointed out that 
kvm_emulate_* was inconsistant in using skipping, while kvm_emulate() always
skips.  The first patch cleans up the existing use while the second patch
adds use of the updated version of kvm_emulate_wbinvd() in svm

---

Joel Schopp (2):
      kvm: x86: make kvm_emulate_* consistant
      x86: svm: make wbinvd faster


 arch/x86/kvm/svm.c |   11 ++++++++---
 arch/x86/kvm/vmx.c |    9 +++------
 arch/x86/kvm/x86.c |   23 ++++++++++++++++++++---
 3 files changed, 31 insertions(+), 12 deletions(-)

--


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 1/2] kvm: x86: make kvm_emulate_* consistant
  2015-03-02 18:04 [PATCH v2 0/2] Series short description Joel Schopp
@ 2015-03-02 18:04 ` Joel Schopp
  2015-03-02 19:04   ` Radim Krčmář
  2015-03-02 18:04 ` [PATCH v2 2/2] x86: svm: make wbinvd faster Joel Schopp
  1 sibling, 1 reply; 6+ messages in thread
From: Joel Schopp @ 2015-03-02 18:04 UTC (permalink / raw)
  To: Gleb Natapov, Paolo Bonzini, kvm
  Cc: Joerg Roedel, Borislav Petkov, linux-kernel, David Kaplan,
	rkrcmar

Currently kvm_emulate() skips the instruction but kvm_emulate_* sometimes
don't.  The end reult is the caller ends up doing the skip themselves.
Let's make them consistant.

Signed-off-by: Joel Schopp <joel.schopp@amd.com>
---
 arch/x86/kvm/svm.c |    2 --
 arch/x86/kvm/vmx.c |    9 +++------
 arch/x86/kvm/x86.c |   23 ++++++++++++++++++++---
 3 files changed, 23 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d319e0c..0c9e377 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1929,14 +1929,12 @@ static int nop_on_interception(struct vcpu_svm *svm)
 static int halt_interception(struct vcpu_svm *svm)
 {
 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
-	skip_emulated_instruction(&svm->vcpu);
 	return kvm_emulate_halt(&svm->vcpu);
 }
 
 static int vmmcall_interception(struct vcpu_svm *svm)
 {
 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
-	skip_emulated_instruction(&svm->vcpu);
 	kvm_emulate_hypercall(&svm->vcpu);
 	return 1;
 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 14c1a18..b7dcd3c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4995,7 +4995,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
 		if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
 			if (vcpu->arch.halt_request) {
 				vcpu->arch.halt_request = 0;
-				return kvm_emulate_halt(vcpu);
+				return kvm_emulate_halt_noskip(vcpu);
 			}
 			return 1;
 		}
@@ -5522,13 +5522,11 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
 
 static int handle_halt(struct kvm_vcpu *vcpu)
 {
-	skip_emulated_instruction(vcpu);
 	return kvm_emulate_halt(vcpu);
 }
 
 static int handle_vmcall(struct kvm_vcpu *vcpu)
 {
-	skip_emulated_instruction(vcpu);
 	kvm_emulate_hypercall(vcpu);
 	return 1;
 }
@@ -5559,7 +5557,6 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
 
 static int handle_wbinvd(struct kvm_vcpu *vcpu)
 {
-	skip_emulated_instruction(vcpu);
 	kvm_emulate_wbinvd(vcpu);
 	return 1;
 }
@@ -5898,7 +5895,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
 
 		if (vcpu->arch.halt_request) {
 			vcpu->arch.halt_request = 0;
-			ret = kvm_emulate_halt(vcpu);
+			ret = kvm_emulate_halt_noskip(vcpu);
 			goto out;
 		}
 
@@ -9513,7 +9510,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 	vmcs12->launch_state = 1;
 
 	if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
-		return kvm_emulate_halt(vcpu);
+		return kvm_emulate_halt_noskip(vcpu);
 
 	vmx->nested.nested_run_pending = 1;
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bd7a70b..96a8333 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4706,7 +4706,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
 	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
 }
 
-int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
+int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
 {
 	if (!need_emulate_wbinvd(vcpu))
 		return X86EMUL_CONTINUE;
@@ -4723,11 +4723,19 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
 		wbinvd();
 	return X86EMUL_CONTINUE;
 }
+
+int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
+{
+	kvm_x86_ops->skip_emulated_instruction(vcpu);
+	return kvm_emulate_wbinvd_noskip(vcpu);
+}
 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
 
+
+
 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
 {
-	kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
+	kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
 }
 
 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
@@ -5817,7 +5825,7 @@ void kvm_arch_exit(void)
 	free_percpu(shared_msrs);
 }
 
-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
 {
 	++vcpu->stat.halt_exits;
 	if (irqchip_in_kernel(vcpu->kvm)) {
@@ -5828,6 +5836,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 		return 0;
 	}
 }
+EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
+
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+	kvm_x86_ops->skip_emulated_instruction(vcpu);
+	return kvm_emulate_halt_noskip(vcpu);
+}
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
@@ -5912,6 +5927,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 	unsigned long nr, a0, a1, a2, a3, ret;
 	int op_64_bit, r = 1;
 
+	kvm_x86_ops->skip_emulated_instruction(vcpu);
+
 	if (kvm_hv_hypercall_enabled(vcpu->kvm))
 		return kvm_hv_hypercall(vcpu);
 


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/2] x86: svm: make wbinvd faster
  2015-03-02 18:04 [PATCH v2 0/2] Series short description Joel Schopp
  2015-03-02 18:04 ` [PATCH v2 1/2] kvm: x86: make kvm_emulate_* consistant Joel Schopp
@ 2015-03-02 18:04 ` Joel Schopp
  2015-03-02 19:05   ` Radim Krčmář
  1 sibling, 1 reply; 6+ messages in thread
From: Joel Schopp @ 2015-03-02 18:04 UTC (permalink / raw)
  To: Gleb Natapov, Paolo Bonzini, kvm
  Cc: David Kaplan, David Kaplan, rkrcmar, Joerg Roedel, linux-kernel,
	Borislav Petkov

From: David Kaplan <David.Kaplan@amd.com>
No need to re-decode WBINVD since we know what it is from the intercept.

Signed-off-by: David Kaplan <David.Kaplan@amd.com>
[extracted from larger unlrelated patch, forward ported, tested]
Signed-off-by: Joel Schopp <joel.schopp@amd.com>
---
 arch/x86/kvm/svm.c |    9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0c9e377..794bca7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2774,6 +2774,13 @@ static int skinit_interception(struct vcpu_svm *svm)
 	return 1;
 }
 
+static int wbinvd_interception(struct vcpu_svm *svm)
+{
+	kvm_emulate_wbinvd(&svm->vcpu);
+	return 1;
+}
+
+
 static int xsetbv_interception(struct vcpu_svm *svm)
 {
 	u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
@@ -3374,7 +3381,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
 	[SVM_EXIT_STGI]				= stgi_interception,
 	[SVM_EXIT_CLGI]				= clgi_interception,
 	[SVM_EXIT_SKINIT]			= skinit_interception,
-	[SVM_EXIT_WBINVD]                       = emulate_on_interception,
+	[SVM_EXIT_WBINVD]                       = wbinvd_interception,
 	[SVM_EXIT_MONITOR]			= monitor_interception,
 	[SVM_EXIT_MWAIT]			= mwait_interception,
 	[SVM_EXIT_XSETBV]			= xsetbv_interception,


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/2] kvm: x86: make kvm_emulate_* consistant
  2015-03-02 18:04 ` [PATCH v2 1/2] kvm: x86: make kvm_emulate_* consistant Joel Schopp
@ 2015-03-02 19:04   ` Radim Krčmář
  2015-03-02 19:20     ` Joel Schopp
  0 siblings, 1 reply; 6+ messages in thread
From: Radim Krčmář @ 2015-03-02 19:04 UTC (permalink / raw)
  To: Joel Schopp
  Cc: Gleb Natapov, Paolo Bonzini, kvm, Joerg Roedel, Borislav Petkov,
	linux-kernel, David Kaplan

2015-03-02 12:04-0600, Joel Schopp:
> Currently kvm_emulate() skips the instruction but kvm_emulate_* sometimes
> don't.  The end reult is the caller ends up doing the skip themselves. 
> Let's make them consistant.
> 
> Signed-off-by: Joel Schopp <joel.schopp@amd.com>
> ---
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> @@ -4995,7 +4995,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
>  		if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
>  			if (vcpu->arch.halt_request) {
>  				vcpu->arch.halt_request = 0;
> -				return kvm_emulate_halt(vcpu);
> +				return kvm_emulate_halt_noskip(vcpu);

noskip is used without being declared ... it shouldn't compile.

*_noskip makes the usual case harder to undertand: we just want to halt
the vcpu, so name it more directly ... like kvm_vcpu_halt()?

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 2/2] x86: svm: make wbinvd faster
  2015-03-02 18:04 ` [PATCH v2 2/2] x86: svm: make wbinvd faster Joel Schopp
@ 2015-03-02 19:05   ` Radim Krčmář
  0 siblings, 0 replies; 6+ messages in thread
From: Radim Krčmář @ 2015-03-02 19:05 UTC (permalink / raw)
  To: Joel Schopp
  Cc: Gleb Natapov, Paolo Bonzini, kvm, David Kaplan, Joerg Roedel,
	linux-kernel, Borislav Petkov

2015-03-02 12:04-0600, Joel Schopp:
> From: David Kaplan <David.Kaplan@amd.com>
> No need to re-decode WBINVD since we know what it is from the intercept.
> 
> Signed-off-by: David Kaplan <David.Kaplan@amd.com>
> [extracted from larger unlrelated patch, forward ported, tested]
> Signed-off-by: Joel Schopp <joel.schopp@amd.com>
> ---

Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>

> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> @@ -2774,6 +2774,13 @@ static int skinit_interception(struct vcpu_svm *svm)
>  	return 1;
>  }
>  
> +static int wbinvd_interception(struct vcpu_svm *svm)
> +{
> +	kvm_emulate_wbinvd(&svm->vcpu);
> +	return 1;
> +}
> +
> +

(Squashing these lines would have been a nice improvement.)

>  static int xsetbv_interception(struct vcpu_svm *svm)
>  {
>  	u64 new_bv = kvm_read_edx_eax(&svm->vcpu);

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/2] kvm: x86: make kvm_emulate_* consistant
  2015-03-02 19:04   ` Radim Krčmář
@ 2015-03-02 19:20     ` Joel Schopp
  0 siblings, 0 replies; 6+ messages in thread
From: Joel Schopp @ 2015-03-02 19:20 UTC (permalink / raw)
  To: Radim Krčmář
  Cc: Gleb Natapov, Paolo Bonzini, kvm, Joerg Roedel, Borislav Petkov,
	linux-kernel, David Kaplan


>> ---
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> @@ -4995,7 +4995,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
>>   		if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
>>   			if (vcpu->arch.halt_request) {
>>   				vcpu->arch.halt_request = 0;
>> -				return kvm_emulate_halt(vcpu);
>> +				return kvm_emulate_halt_noskip(vcpu);
> noskip is used without being declared ... it shouldn't compile.
I tested on AMD hardware, I thought I had turned on the Intel KVM module 
as well, but it turns out I hadn't.  Will fix in v3.

> *_noskip makes the usual case harder to undertand: we just want to halt
> the vcpu, so name it more directly ... like kvm_vcpu_halt()?
I like that better.  Will make the change in v3.


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2015-03-02 19:21 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-03-02 18:04 [PATCH v2 0/2] Series short description Joel Schopp
2015-03-02 18:04 ` [PATCH v2 1/2] kvm: x86: make kvm_emulate_* consistant Joel Schopp
2015-03-02 19:04   ` Radim Krčmář
2015-03-02 19:20     ` Joel Schopp
2015-03-02 18:04 ` [PATCH v2 2/2] x86: svm: make wbinvd faster Joel Schopp
2015-03-02 19:05   ` Radim Krčmář

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).