public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-07 10:59 [PATCH 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
@ 2010-12-07 10:59 ` Andre Przywara
  2010-12-07 13:24   ` Avi Kivity
  0 siblings, 1 reply; 17+ messages in thread
From: Andre Przywara @ 2010-12-07 10:59 UTC (permalink / raw)
  To: avi; +Cc: kvm, mtosatti, Andre Przywara

Newer SVM implementations provide the GPR number in the VMCB, so
that the emulation path is no longer necesarry to handle CR
register access intercepts. Implement the handling in svm.c and
use it when the info is provided.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/include/asm/svm.h |    2 +
 arch/x86/kvm/svm.c         |   74 +++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 68 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 11dbca7..589fc25 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -256,6 +256,8 @@ struct __attribute__ ((__packed__)) vmcb {
 #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
 #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
 
+#define SVM_EXITINFO_REG_MASK 0x0F
+
 #define	SVM_EXIT_READ_CR0 	0x000
 #define	SVM_EXIT_READ_CR3 	0x003
 #define	SVM_EXIT_READ_CR4 	0x004
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c573e2d..b7233fd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2594,12 +2594,70 @@ static int emulate_on_interception(struct vcpu_svm *svm)
 	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
 }
 
+static int cr_interception(struct vcpu_svm *svm)
+{
+	int reg, cr;
+	unsigned long val;
+
+	if (!boot_cpu_has(SVM_FEATURE_DECODE_ASSIST) ||
+	    (svm->vmcb->control.exit_info_1 & (1ULL << 63)) == 0)
+		return emulate_on_interception(svm);
+
+	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+	cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+	if (cr > 15) { /* mov to cr */
+		val = kvm_register_read(&svm->vcpu, reg);
+		switch (cr & 0x0F) {
+		case 0:
+			kvm_set_cr0(&svm->vcpu, val);
+			break;
+		case 3:
+			kvm_set_cr3(&svm->vcpu, val);
+			break;
+		case 4:
+			kvm_set_cr4(&svm->vcpu, val);
+			break;
+		case 8:
+			kvm_set_cr8(&svm->vcpu, val & 0xfUL);
+			break;
+		default:
+			WARN(1, "unhandled write to CR%d", cr & 0x0F);
+			return EMULATE_FAIL;
+		}
+	} else { /* mov from cr */
+		switch (cr & 0x0F) {
+		case 0:
+			val = kvm_read_cr0(&svm->vcpu);
+			break;
+		case 2:
+			val = svm->vcpu.arch.cr2;
+			break;
+		case 3:
+			val = svm->vcpu.arch.cr3;
+			break;
+		case 4:
+			val = kvm_read_cr4(&svm->vcpu);
+			break;
+		case 8:
+			val = kvm_get_cr8(&svm->vcpu);
+			break;
+		default:
+			WARN(1, "unhandled read from CR%d", cr & 0x0F);
+			return EMULATE_FAIL;
+		}
+		kvm_register_write(&svm->vcpu, reg, val);
+	}
+	skip_emulated_instruction(&svm->vcpu);
+
+	return 1;
+}
+
 static int cr0_write_interception(struct vcpu_svm *svm)
 {
 	struct kvm_vcpu *vcpu = &svm->vcpu;
 	int r;
 
-	r = emulate_instruction(&svm->vcpu, 0, 0, 0);
+	r = cr_interception(svm);
 
 	if (svm->nested.vmexit_rip) {
 		kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
@@ -2617,7 +2675,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
 
 	u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
 	/* instruction emulation calls kvm_set_cr8() */
-	emulate_instruction(&svm->vcpu, 0, 0, 0);
+	cr_interception(svm);
 	if (irqchip_in_kernel(svm->vcpu.kvm)) {
 		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 		return 1;
@@ -2864,14 +2922,14 @@ static int pause_interception(struct vcpu_svm *svm)
 }
 
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
-	[SVM_EXIT_READ_CR0]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR3]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
+	[SVM_EXIT_READ_CR0]			= cr_interception,
+	[SVM_EXIT_READ_CR3]			= cr_interception,
+	[SVM_EXIT_READ_CR4]			= cr_interception,
+	[SVM_EXIT_READ_CR8]			= cr_interception,
 	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
 	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
-	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
+	[SVM_EXIT_WRITE_CR3]			= cr_interception,
+	[SVM_EXIT_WRITE_CR4]			= cr_interception,
 	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
 	[SVM_EXIT_READ_DR0]			= emulate_on_interception,
 	[SVM_EXIT_READ_DR1]			= emulate_on_interception,
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-07 10:59 ` [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler Andre Przywara
@ 2010-12-07 13:24   ` Avi Kivity
  2010-12-07 14:30     ` Andre Przywara
  0 siblings, 1 reply; 17+ messages in thread
From: Avi Kivity @ 2010-12-07 13:24 UTC (permalink / raw)
  To: Andre Przywara; +Cc: kvm, mtosatti

On 12/07/2010 12:59 PM, Andre Przywara wrote:
> Newer SVM implementations provide the GPR number in the VMCB, so
> that the emulation path is no longer necesarry to handle CR
> register access intercepts. Implement the handling in svm.c and
> use it when the info is provided.
>
> Signed-off-by: Andre Przywara<andre.przywara@amd.com>
> ---
>   arch/x86/include/asm/svm.h |    2 +
>   arch/x86/kvm/svm.c         |   74 +++++++++++++++++++++++++++++++++++++++-----
>   2 files changed, 68 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> index 11dbca7..589fc25 100644
> --- a/arch/x86/include/asm/svm.h
> +++ b/arch/x86/include/asm/svm.h
> @@ -256,6 +256,8 @@ struct __attribute__ ((__packed__)) vmcb {
>   #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
>   #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
>
> +#define SVM_EXITINFO_REG_MASK 0x0F
> +
>   #define	SVM_EXIT_READ_CR0 	0x000
>   #define	SVM_EXIT_READ_CR3 	0x003
>   #define	SVM_EXIT_READ_CR4 	0x004
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index c573e2d..b7233fd 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -2594,12 +2594,70 @@ static int emulate_on_interception(struct vcpu_svm *svm)
>   	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
>   }
>
> +static int cr_interception(struct vcpu_svm *svm)
> +{
> +	int reg, cr;
> +	unsigned long val;
> +
> +	if (!boot_cpu_has(SVM_FEATURE_DECODE_ASSIST) ||
> +	    (svm->vmcb->control.exit_info_1&  (1ULL<<  63)) == 0)
> +		return emulate_on_interception(svm);
> +
> +	reg = svm->vmcb->control.exit_info_1&  SVM_EXITINFO_REG_MASK;
> +	cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
> +	if (cr>  15) { /* mov to cr */
> +		val = kvm_register_read(&svm->vcpu, reg);
> +		switch (cr&  0x0F) {
> +		case 0:
> +			kvm_set_cr0(&svm->vcpu, val);
> +			break;
> +		case 3:
> +			kvm_set_cr3(&svm->vcpu, val);
> +			break;
> +		case 4:
> +			kvm_set_cr4(&svm->vcpu, val);
> +			break;
> +		case 8:
> +			kvm_set_cr8(&svm->vcpu, val&  0xfUL);
> +			break;
> +		default:
> +			WARN(1, "unhandled write to CR%d", cr&  0x0F);
> +			return EMULATE_FAIL;
> +		}
>

...

> +	}
> +	skip_emulated_instruction(&svm->vcpu);
> +
> +	return 1;
> +}
> +

Must not skip_emulated_instruction() if we inject an exception; see how 
vmx does this (handle_cr).

> @@ -2864,14 +2922,14 @@ static int pause_interception(struct vcpu_svm *svm)
>   }
>
>   static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
> -	[SVM_EXIT_READ_CR0]			= emulate_on_interception,
> -	[SVM_EXIT_READ_CR3]			= emulate_on_interception,
> -	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
> -	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
> +	[SVM_EXIT_READ_CR0]			= cr_interception,
> +	[SVM_EXIT_READ_CR3]			= cr_interception,
> +	[SVM_EXIT_READ_CR4]			= cr_interception,
> +	[SVM_EXIT_READ_CR8]			= cr_interception,
>   	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
>   	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
> -	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
> -	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
> +	[SVM_EXIT_WRITE_CR3]			= cr_interception,
> +	[SVM_EXIT_WRITE_CR4]			= cr_interception,
>   	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,

We could move cr[08]_write_interception into cr_interception, but that 
takes a bit more thought.  Best done later.

>   	[SVM_EXIT_READ_DR0]			= emulate_on_interception,
>   	[SVM_EXIT_READ_DR1]			= emulate_on_interception,


-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-07 13:24   ` Avi Kivity
@ 2010-12-07 14:30     ` Andre Przywara
  2010-12-07 14:41       ` Avi Kivity
  0 siblings, 1 reply; 17+ messages in thread
From: Andre Przywara @ 2010-12-07 14:30 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm@vger.kernel.org, mtosatti@redhat.com

Avi Kivity wrote:
> On 12/07/2010 12:59 PM, Andre Przywara wrote:
>> Newer SVM implementations provide the GPR number in the VMCB, so
>> that the emulation path is no longer necesarry to handle CR
>> register access intercepts. Implement the handling in svm.c and
>> use it when the info is provided.
>>
>> Signed-off-by: Andre Przywara<andre.przywara@amd.com>
>> ---
>>   arch/x86/include/asm/svm.h |    2 +
>>   arch/x86/kvm/svm.c         |   74 +++++++++++++++++++++++++++++++++++++++-----
>>   2 files changed, 68 insertions(+), 8 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
>> index 11dbca7..589fc25 100644
>> --- a/arch/x86/include/asm/svm.h
>> +++ b/arch/x86/include/asm/svm.h
>> ...
>> @@ -2864,14 +2922,14 @@ static int pause_interception(struct vcpu_svm *svm)
>>   }
>>
>>   static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
>> -	[SVM_EXIT_READ_CR0]			= emulate_on_interception,
>> -	[SVM_EXIT_READ_CR3]			= emulate_on_interception,
>> -	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
>> -	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
>> +	[SVM_EXIT_READ_CR0]			= cr_interception,
>> +	[SVM_EXIT_READ_CR3]			= cr_interception,
>> +	[SVM_EXIT_READ_CR4]			= cr_interception,
>> +	[SVM_EXIT_READ_CR8]			= cr_interception,
>>   	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
>>   	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
>> -	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
>> -	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
>> +	[SVM_EXIT_WRITE_CR3]			= cr_interception,
>> +	[SVM_EXIT_WRITE_CR4]			= cr_interception,
>>   	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
> 
> We could move cr[08]_write_interception into cr_interception, but that 
> takes a bit more thought.  Best done later.
Yes, I thought about that, too. But since we still have to deal with the 
emulation code path, it would not make the code easier. But if we 
overwrite the svm_exit_handlers[] on detecting the SVM feature, this 
could make more sense. I will check this out.

Thanks for the review, I will address your comments.

Regards,
Andre.




^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-07 14:30     ` Andre Przywara
@ 2010-12-07 14:41       ` Avi Kivity
  0 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2010-12-07 14:41 UTC (permalink / raw)
  To: Andre Przywara; +Cc: kvm@vger.kernel.org, mtosatti@redhat.com

On 12/07/2010 04:30 PM, Andre Przywara wrote:
> Avi Kivity wrote:
>> On 12/07/2010 12:59 PM, Andre Przywara wrote:
>>> Newer SVM implementations provide the GPR number in the VMCB, so
>>> that the emulation path is no longer necesarry to handle CR
>>> register access intercepts. Implement the handling in svm.c and
>>> use it when the info is provided.
>>>
>>> Signed-off-by: Andre Przywara<andre.przywara@amd.com>
>>> ---
>>>   arch/x86/include/asm/svm.h |    2 +
>>>   arch/x86/kvm/svm.c         |   74 
>>> +++++++++++++++++++++++++++++++++++++++-----
>>>   2 files changed, 68 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
>>> index 11dbca7..589fc25 100644
>>> --- a/arch/x86/include/asm/svm.h
>>> +++ b/arch/x86/include/asm/svm.h
>>> ...
>>> @@ -2864,14 +2922,14 @@ static int pause_interception(struct 
>>> vcpu_svm *svm)
>>>   }
>>>
>>>   static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
>>> -    [SVM_EXIT_READ_CR0]            = emulate_on_interception,
>>> -    [SVM_EXIT_READ_CR3]            = emulate_on_interception,
>>> -    [SVM_EXIT_READ_CR4]            = emulate_on_interception,
>>> -    [SVM_EXIT_READ_CR8]            = emulate_on_interception,
>>> +    [SVM_EXIT_READ_CR0]            = cr_interception,
>>> +    [SVM_EXIT_READ_CR3]            = cr_interception,
>>> +    [SVM_EXIT_READ_CR4]            = cr_interception,
>>> +    [SVM_EXIT_READ_CR8]            = cr_interception,
>>>       [SVM_EXIT_CR0_SEL_WRITE]        = emulate_on_interception,
>>>       [SVM_EXIT_WRITE_CR0]            = cr0_write_interception,
>>> -    [SVM_EXIT_WRITE_CR3]            = emulate_on_interception,
>>> -    [SVM_EXIT_WRITE_CR4]            = emulate_on_interception,
>>> +    [SVM_EXIT_WRITE_CR3]            = cr_interception,
>>> +    [SVM_EXIT_WRITE_CR4]            = cr_interception,
>>>       [SVM_EXIT_WRITE_CR8]            = cr8_write_interception,
>>
>> We could move cr[08]_write_interception into cr_interception, but 
>> that takes a bit more thought.  Best done later.
> Yes, I thought about that, too. But since we still have to deal with 
> the emulation code path, it would not make the code easier. But if we 
> overwrite the svm_exit_handlers[] on detecting the SVM feature, this 
> could make more sense. I will check this out.
>

In fact the correct thing is to make sure kvm_set_cr0() and 
kvm_set_cr8() do the right thing, so that the emulation path invoked 
directly (not through a cr intercept) works.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH -v2 0/5] kvm/svm: implement new DecodeAssist features
@ 2010-12-10 13:51 Andre Przywara
  2010-12-10 13:51 ` [PATCH 1/5] kvm/svm: add new SVM feature bit names Andre Przywara
                   ` (4 more replies)
  0 siblings, 5 replies; 17+ messages in thread
From: Andre Przywara @ 2010-12-10 13:51 UTC (permalink / raw)
  To: avi; +Cc: mtosatti, kvm

Hi,

version 2 of the DecodeAssist patches.
Changes over version 1:
- goes on top of the CR8 handling fix I sent out earlier this week
  (required for proper handling of CR8 exceptions)
- handles exception cases properly (for mov cr and mov dr)
- uses X86_FEATURE_ names instead of SVM_FEATURE names (for boot_cpu_has)
  (thanks to Joerg for spotting this)
- use static_cpu_has where appropriate
- some minor code cleanups (for instance cr register calculation)
- move prefetch callback into x86_decode_insn and out of every fetch
  I refrained from ditching the callback at all, as I dont like extending
  every emulate_instruction call with "NULL, 0". But if this is
  desperately needed, I can still change it.
- rename vendor specific prefetch function names


Upcoming AMD CPUs will have a SVM enhancement called DecodeAssist
which will provide more information when intercepting certain events.
These information allows to skip the instruction fetching and
decoding and handle the intercept immediately.
This patch set implements all the features which are documented
in the recent AMD manual (APM vol. 2). For details see the patches.

Please review and apply.

Regards,
Andre.



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 1/5] kvm/svm: add new SVM feature bit names
  2010-12-10 13:51 [PATCH -v2 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
@ 2010-12-10 13:51 ` Andre Przywara
  2010-12-10 13:51 ` [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler Andre Przywara
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 17+ messages in thread
From: Andre Przywara @ 2010-12-10 13:51 UTC (permalink / raw)
  To: avi; +Cc: mtosatti, kvm, Andre Przywara

the recent APM Vol.2 and the recent AMD CPUID specification describe
new CPUID features bits for SVM. Name them here for later usage.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/kvm/svm.c |    4 ++++
 1 files changed, 4 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ed5950c..298ff79 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -51,6 +51,10 @@ MODULE_LICENSE("GPL");
 #define SVM_FEATURE_LBRV           (1 <<  1)
 #define SVM_FEATURE_SVML           (1 <<  2)
 #define SVM_FEATURE_NRIP           (1 <<  3)
+#define SVM_FEATURE_TSC_RATE       (1 <<  4)
+#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
+#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
+#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
 
 #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-10 13:51 [PATCH -v2 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
  2010-12-10 13:51 ` [PATCH 1/5] kvm/svm: add new SVM feature bit names Andre Przywara
@ 2010-12-10 13:51 ` Andre Przywara
  2010-12-13 12:13   ` Avi Kivity
  2010-12-20 11:56   ` Marcelo Tosatti
  2010-12-10 13:51 ` [PATCH 3/5] kvm/svm: enhance mov DR " Andre Przywara
                   ` (2 subsequent siblings)
  4 siblings, 2 replies; 17+ messages in thread
From: Andre Przywara @ 2010-12-10 13:51 UTC (permalink / raw)
  To: avi; +Cc: mtosatti, kvm, Andre Przywara

Newer SVM implementations provide the GPR number in the VMCB, so
that the emulation path is no longer necesarry to handle CR
register access intercepts. Implement the handling in svm.c and
use it when the info is provided.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/include/asm/svm.h |    2 +
 arch/x86/kvm/svm.c         |   91 ++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 82 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 11dbca7..589fc25 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -256,6 +256,8 @@ struct __attribute__ ((__packed__)) vmcb {
 #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
 #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
 
+#define SVM_EXITINFO_REG_MASK 0x0F
+
 #define	SVM_EXIT_READ_CR0 	0x000
 #define	SVM_EXIT_READ_CR3 	0x003
 #define	SVM_EXIT_READ_CR4 	0x004
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 298ff79..ee5f100 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2594,12 +2594,81 @@ static int emulate_on_interception(struct vcpu_svm *svm)
 	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
 }
 
+static int cr_interception(struct vcpu_svm *svm)
+{
+	int reg, cr;
+	unsigned long val;
+	int err;
+
+	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_on_interception(svm);
+
+	/* bit 63 is the valid bit, as not all instructions (like lmsw)
+	   provide the information */
+	if (unlikely((svm->vmcb->control.exit_info_1 & (1ULL << 63)) == 0))
+		return emulate_on_interception(svm);
+
+	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+	cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+
+	err = 0;
+	if (cr >= 16) { /* mov to cr */
+		cr -= 16;
+		val = kvm_register_read(&svm->vcpu, reg);
+		switch (cr) {
+		case 0:
+			err = kvm_set_cr0(&svm->vcpu, val);
+			break;
+		case 3:
+			err = kvm_set_cr3(&svm->vcpu, val);
+			break;
+		case 4:
+			err = kvm_set_cr4(&svm->vcpu, val);
+			break;
+		case 8:
+			err = kvm_set_cr8(&svm->vcpu, val);
+			break;
+		default:
+			WARN(1, "unhandled write to CR%d", cr);
+			return EMULATE_FAIL;
+		}
+	} else { /* mov from cr */
+		switch (cr) {
+		case 0:
+			val = kvm_read_cr0(&svm->vcpu);
+			break;
+		case 2:
+			val = svm->vcpu.arch.cr2;
+			break;
+		case 3:
+			val = svm->vcpu.arch.cr3;
+			break;
+		case 4:
+			val = kvm_read_cr4(&svm->vcpu);
+			break;
+		case 8:
+			val = kvm_get_cr8(&svm->vcpu);
+			break;
+		default:
+			WARN(1, "unhandled read from CR%d", cr);
+			return EMULATE_FAIL;
+		}
+		kvm_register_write(&svm->vcpu, reg, val);
+	}
+	if (!err)
+		skip_emulated_instruction(&svm->vcpu);
+	else
+		kvm_inject_gp(&svm->vcpu, 0);
+
+	return 1;
+}
+
 static int cr0_write_interception(struct vcpu_svm *svm)
 {
 	struct kvm_vcpu *vcpu = &svm->vcpu;
 	int r;
 
-	r = emulate_instruction(&svm->vcpu, 0, 0, 0);
+	r = cr_interception(svm);
 
 	if (svm->nested.vmexit_rip) {
 		kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
@@ -2608,7 +2677,7 @@ static int cr0_write_interception(struct vcpu_svm *svm)
 		svm->nested.vmexit_rip = 0;
 	}
 
-	return r == EMULATE_DONE;
+	return r;
 }
 
 static int cr8_write_interception(struct vcpu_svm *svm)
@@ -2618,13 +2687,13 @@ static int cr8_write_interception(struct vcpu_svm *svm)
 
 	u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
 	/* instruction emulation calls kvm_set_cr8() */
-	r = emulate_instruction(&svm->vcpu, 0, 0, 0);
+	r = cr_interception(svm);
 	if (irqchip_in_kernel(svm->vcpu.kvm)) {
 		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
-		return r == EMULATE_DONE;
+		return r;
 	}
 	if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
-		return r == EMULATE_DONE;
+		return r;
 	kvm_run->exit_reason = KVM_EXIT_SET_TPR;
 	return 0;
 }
@@ -2865,14 +2934,14 @@ static int pause_interception(struct vcpu_svm *svm)
 }
 
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
-	[SVM_EXIT_READ_CR0]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR3]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
+	[SVM_EXIT_READ_CR0]			= cr_interception,
+	[SVM_EXIT_READ_CR3]			= cr_interception,
+	[SVM_EXIT_READ_CR4]			= cr_interception,
+	[SVM_EXIT_READ_CR8]			= cr_interception,
 	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
 	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
-	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
+	[SVM_EXIT_WRITE_CR3]			= cr_interception,
+	[SVM_EXIT_WRITE_CR4]			= cr_interception,
 	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
 	[SVM_EXIT_READ_DR0]			= emulate_on_interception,
 	[SVM_EXIT_READ_DR1]			= emulate_on_interception,
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 3/5] kvm/svm: enhance mov DR intercept handler
  2010-12-10 13:51 [PATCH -v2 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
  2010-12-10 13:51 ` [PATCH 1/5] kvm/svm: add new SVM feature bit names Andre Przywara
  2010-12-10 13:51 ` [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler Andre Przywara
@ 2010-12-10 13:51 ` Andre Przywara
  2010-12-13 12:14   ` Avi Kivity
  2010-12-10 13:51 ` [PATCH 4/5] kvm/svm: implement enhanced INVLPG intercept Andre Przywara
  2010-12-10 13:51 ` [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB Andre Przywara
  4 siblings, 1 reply; 17+ messages in thread
From: Andre Przywara @ 2010-12-10 13:51 UTC (permalink / raw)
  To: avi; +Cc: mtosatti, kvm, Andre Przywara

Newer SVM implementations provide the GPR number in the VMCB, so
that the emulation path is no longer necesarry to handle debug
register access intercepts. Implement the handling in svm.c and
use it when the info is provided.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/kvm/svm.c |   61 ++++++++++++++++++++++++++++++++++++++-------------
 1 files changed, 45 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ee5f100..ecb4acf 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2680,6 +2680,35 @@ static int cr0_write_interception(struct vcpu_svm *svm)
 	return r;
 }
 
+static int dr_interception(struct vcpu_svm *svm)
+{
+	int reg, dr;
+	unsigned long val;
+	int err;
+
+	if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_on_interception(svm);
+
+	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
+
+	if (dr >= 16) { /* mov to DRn */
+		val = kvm_register_read(&svm->vcpu, reg);
+		err = kvm_set_dr(&svm->vcpu, dr - 16, val);
+	} else {
+		err = kvm_get_dr(&svm->vcpu, dr, &val);
+		if (!err)
+			kvm_register_write(&svm->vcpu, reg, val);
+	}
+
+	if (!err)
+		skip_emulated_instruction(&svm->vcpu);
+	else
+		kvm_inject_gp(&svm->vcpu, 0);
+
+	return 1;
+}
+
 static int cr8_write_interception(struct vcpu_svm *svm)
 {
 	struct kvm_run *kvm_run = svm->vcpu.run;
@@ -2943,22 +2972,22 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
 	[SVM_EXIT_WRITE_CR3]			= cr_interception,
 	[SVM_EXIT_WRITE_CR4]			= cr_interception,
 	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
-	[SVM_EXIT_READ_DR0]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR1]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR2]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR3]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR4]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR5]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR6]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR7]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR0]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR1]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR2]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR3]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR4]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR5]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR6]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR7]			= emulate_on_interception,
+	[SVM_EXIT_READ_DR0]			= dr_interception,
+	[SVM_EXIT_READ_DR1]			= dr_interception,
+	[SVM_EXIT_READ_DR2]			= dr_interception,
+	[SVM_EXIT_READ_DR3]			= dr_interception,
+	[SVM_EXIT_READ_DR4]			= dr_interception,
+	[SVM_EXIT_READ_DR5]			= dr_interception,
+	[SVM_EXIT_READ_DR6]			= dr_interception,
+	[SVM_EXIT_READ_DR7]			= dr_interception,
+	[SVM_EXIT_WRITE_DR0]			= dr_interception,
+	[SVM_EXIT_WRITE_DR1]			= dr_interception,
+	[SVM_EXIT_WRITE_DR2]			= dr_interception,
+	[SVM_EXIT_WRITE_DR3]			= dr_interception,
+	[SVM_EXIT_WRITE_DR4]			= dr_interception,
+	[SVM_EXIT_WRITE_DR5]			= dr_interception,
+	[SVM_EXIT_WRITE_DR6]			= dr_interception,
+	[SVM_EXIT_WRITE_DR7]			= dr_interception,
 	[SVM_EXIT_EXCP_BASE + DB_VECTOR]	= db_interception,
 	[SVM_EXIT_EXCP_BASE + BP_VECTOR]	= bp_interception,
 	[SVM_EXIT_EXCP_BASE + UD_VECTOR]	= ud_interception,
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 4/5] kvm/svm: implement enhanced INVLPG intercept
  2010-12-10 13:51 [PATCH -v2 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
                   ` (2 preceding siblings ...)
  2010-12-10 13:51 ` [PATCH 3/5] kvm/svm: enhance mov DR " Andre Przywara
@ 2010-12-10 13:51 ` Andre Przywara
  2010-12-10 13:51 ` [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB Andre Przywara
  4 siblings, 0 replies; 17+ messages in thread
From: Andre Przywara @ 2010-12-10 13:51 UTC (permalink / raw)
  To: avi; +Cc: mtosatti, kvm, Andre Przywara

When the DecodeAssist feature is available, the linear address
is provided in the VMCB on INVLPG intercepts. Use it directly to
avoid any decoding and emulation.
This is only useful for shadow paging, though.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/kvm/svm.c |    7 ++++++-
 1 files changed, 6 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ecb4acf..73f1a6d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2586,7 +2586,12 @@ static int iret_interception(struct vcpu_svm *svm)
 
 static int invlpg_interception(struct vcpu_svm *svm)
 {
-	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+
+	kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
+	skip_emulated_instruction(&svm->vcpu);
+	return 1;
 }
 
 static int emulate_on_interception(struct vcpu_svm *svm)
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB
  2010-12-10 13:51 [PATCH -v2 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
                   ` (3 preceding siblings ...)
  2010-12-10 13:51 ` [PATCH 4/5] kvm/svm: implement enhanced INVLPG intercept Andre Przywara
@ 2010-12-10 13:51 ` Andre Przywara
  2010-12-13 12:24   ` Avi Kivity
  4 siblings, 1 reply; 17+ messages in thread
From: Andre Przywara @ 2010-12-10 13:51 UTC (permalink / raw)
  To: avi; +Cc: mtosatti, kvm, Andre Przywara

In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/include/asm/kvm_host.h |    3 +++
 arch/x86/include/asm/svm.h      |    4 +++-
 arch/x86/kvm/emulate.c          |    1 +
 arch/x86/kvm/svm.c              |   20 ++++++++++++++++++++
 arch/x86/kvm/vmx.c              |    7 +++++++
 5 files changed, 34 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2b89195..baaf063 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -586,6 +586,9 @@ struct kvm_x86_ops {
 	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
 	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
+
+	int (*prefetch_instruction)(struct kvm_vcpu *vcpu);
+
 	const struct trace_print_flags *exit_reasons_str;
 };
 
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 589fc25..6d64b1d 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -81,7 +81,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 	u64 lbr_ctl;
 	u64 reserved_5;
 	u64 next_rip;
-	u8 reserved_6[816];
+	u8 insn_len;
+	u8 insn_bytes[15];
+	u8 reserved_6[800];
 };
 
 
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6366735..70385ee 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2623,6 +2623,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt)
 	c->eip = ctxt->eip;
 	c->fetch.start = c->fetch.end = c->eip;
 	ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
+	kvm_x86_ops->prefetch_instruction(ctxt->vcpu);
 
 	switch (mode) {
 	case X86EMUL_MODE_REAL:
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 73f1a6d..685b264 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -464,6 +464,24 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	svm_set_interrupt_shadow(vcpu, 0);
 }
 
+static int svm_prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+	uint8_t len;
+	struct fetch_cache *fetch;
+
+	len = svm->vmcb->control.insn_len & 0x0F;
+	if (len == 0)
+		return 1;
+
+	fetch = &svm->vcpu.arch.emulate_ctxt.decode.fetch;
+	fetch->start = kvm_rip_read(&svm->vcpu);
+	fetch->end = fetch->start + len;
+	memcpy(fetch->data, svm->vmcb->control.insn_bytes, len);
+
+	return 0;
+}
+
 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
 				bool has_error_code, u32 error_code,
 				bool reinject)
@@ -3848,6 +3866,8 @@ static struct kvm_x86_ops svm_x86_ops = {
 	.adjust_tsc_offset = svm_adjust_tsc_offset,
 
 	.set_tdp_cr3 = set_tdp_cr3,
+
+	.prefetch_instruction = svm_prefetch_instruction,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e5ef924..7572751 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1009,6 +1009,11 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	vmx_set_interrupt_shadow(vcpu, 0);
 }
 
+static int vmx_prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+	return 1;
+}
+
 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
 				bool has_error_code, u32 error_code,
 				bool reinject)
@@ -4362,6 +4367,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
 	.adjust_tsc_offset = vmx_adjust_tsc_offset,
 
 	.set_tdp_cr3 = vmx_set_cr3,
+
+	.prefetch_instruction = vmx_prefetch_instruction,
 };
 
 static int __init vmx_init(void)
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-10 13:51 ` [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler Andre Przywara
@ 2010-12-13 12:13   ` Avi Kivity
  2010-12-20 11:56   ` Marcelo Tosatti
  1 sibling, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2010-12-13 12:13 UTC (permalink / raw)
  To: Andre Przywara; +Cc: mtosatti, kvm

On 12/10/2010 03:51 PM, Andre Przywara wrote:
> Newer SVM implementations provide the GPR number in the VMCB, so
> that the emulation path is no longer necesarry to handle CR
> register access intercepts. Implement the handling in svm.c and
> use it when the info is provided.
>
> Signed-off-by: Andre Przywara<andre.przywara@amd.com>
> ---
>   arch/x86/include/asm/svm.h |    2 +
>   arch/x86/kvm/svm.c         |   91 ++++++++++++++++++++++++++++++++++++++-----
>   2 files changed, 82 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> index 11dbca7..589fc25 100644
> --- a/arch/x86/include/asm/svm.h
> +++ b/arch/x86/include/asm/svm.h
> @@ -256,6 +256,8 @@ struct __attribute__ ((__packed__)) vmcb {
>   #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
>   #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
>
> +#define SVM_EXITINFO_REG_MASK 0x0F
> +
>   #define	SVM_EXIT_READ_CR0 	0x000
>   #define	SVM_EXIT_READ_CR3 	0x003
>   #define	SVM_EXIT_READ_CR4 	0x004
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 298ff79..ee5f100 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -2594,12 +2594,81 @@ static int emulate_on_interception(struct vcpu_svm *svm)
>   	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
>   }
>
> +static int cr_interception(struct vcpu_svm *svm)
> +{
> +	int reg, cr;
> +	unsigned long val;
> +	int err;
> +
> +	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
> +		return emulate_on_interception(svm);
> +
> +	/* bit 63 is the valid bit, as not all instructions (like lmsw)
> +	   provide the information */

Please use kernel style comments:

  /*
   * text
   * text
   */

Even better, use a name for the bit, which will obviate the need for a 
comment.


-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 3/5] kvm/svm: enhance mov DR intercept handler
  2010-12-10 13:51 ` [PATCH 3/5] kvm/svm: enhance mov DR " Andre Przywara
@ 2010-12-13 12:14   ` Avi Kivity
  2010-12-16 12:07     ` Andre Przywara
  0 siblings, 1 reply; 17+ messages in thread
From: Avi Kivity @ 2010-12-13 12:14 UTC (permalink / raw)
  To: Andre Przywara; +Cc: mtosatti, kvm

On 12/10/2010 03:51 PM, Andre Przywara wrote:
> Newer SVM implementations provide the GPR number in the VMCB, so
> that the emulation path is no longer necesarry to handle debug
> register access intercepts. Implement the handling in svm.c and
> use it when the info is provided.
>
> +
> +	if (!err)
> +		skip_emulated_instruction(&svm->vcpu);
> +	else
> +		kvm_inject_gp(&svm->vcpu, 0);
> +

This repeats, how about using complete_insn_gp()?

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB
  2010-12-10 13:51 ` [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB Andre Przywara
@ 2010-12-13 12:24   ` Avi Kivity
  0 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2010-12-13 12:24 UTC (permalink / raw)
  To: Andre Przywara; +Cc: mtosatti, kvm

On 12/10/2010 03:51 PM, Andre Przywara wrote:
> In case of a nested page fault or an intercepted #PF newer SVM
> implementations provide a copy of the faulting instruction bytes
> in the VMCB.
> Use these bytes to feed the instruction emulator and avoid the costly
> guest instruction fetch in this case.
>
>
>
> +static int svm_prefetch_instruction(struct kvm_vcpu *vcpu)
> +{
> +	struct vcpu_svm *svm = to_svm(vcpu);
> +	uint8_t len;
> +	struct fetch_cache *fetch;
> +
> +	len = svm->vmcb->control.insn_len&  0x0F;
> +	if (len == 0)
> +		return 1;
> +
> +	fetch =&svm->vcpu.arch.emulate_ctxt.decode.fetch;
> +	fetch->start = kvm_rip_read(&svm->vcpu);
> +	fetch->end = fetch->start + len;
> +	memcpy(fetch->data, svm->vmcb->control.insn_bytes, len);
> +
> +	return 0;
> +}

This reaching in into the emulator internals from svm code is not very 
good.  It also assumes ->prefetch_instruction() is called immediately 
after an exit; this isn't true in vmx and at least was considered for 
svm (emulating multiple instructions during the nsvm vmexit sequence).

Alternatives are:
- add the insn data to emulate_instruction() and friends (my first 
suggestion)
- adding x86_decode_insn_init(), which initializes the decode cache, and 
x86_decode_insn_prefill_cache(), called only if we have the insn data

Another one: teach kvm_fetch_guest_virt() to check if addr/bytes 
intersects with csbase+rip/len; if so, use that instead of doing the 
page table dance.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 3/5] kvm/svm: enhance mov DR intercept handler
  2010-12-13 12:14   ` Avi Kivity
@ 2010-12-16 12:07     ` Andre Przywara
  2010-12-16 12:48       ` Avi Kivity
  0 siblings, 1 reply; 17+ messages in thread
From: Andre Przywara @ 2010-12-16 12:07 UTC (permalink / raw)
  To: Avi Kivity; +Cc: mtosatti@redhat.com, kvm@vger.kernel.org

Avi Kivity wrote:
> On 12/10/2010 03:51 PM, Andre Przywara wrote:
>> Newer SVM implementations provide the GPR number in the VMCB, so
>> that the emulation path is no longer necesarry to handle debug
>> register access intercepts. Implement the handling in svm.c and
>> use it when the info is provided.
>>
>> +
>> +	if (!err)
>> +		skip_emulated_instruction(&svm->vcpu);
>> +	else
>> +		kvm_inject_gp(&svm->vcpu, 0);
>> +
> 
> This repeats, how about using complete_insn_gp()?
Do you want this to be in x86.c? We could use 
kvm_x86_ops->skip_emulated_instruction.
Also shall I look for more similarities in the [CD]R intercept handling 
code between VMX and SVM? The switch responsible for actually reading 
and writing the regs is very similar. I could also try to merge this 
with the functionality in the emulator (if that works, haven't checked)

Regards,
Andre.

-- 
Andre Przywara
AMD-Operating System Research Center (OSRC), Dresden, Germany
Tel: +49 351 448-3567-12


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 3/5] kvm/svm: enhance mov DR intercept handler
  2010-12-16 12:07     ` Andre Przywara
@ 2010-12-16 12:48       ` Avi Kivity
  0 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2010-12-16 12:48 UTC (permalink / raw)
  To: Andre Przywara; +Cc: mtosatti@redhat.com, kvm@vger.kernel.org

On 12/16/2010 02:07 PM, Andre Przywara wrote:
> Avi Kivity wrote:
>> On 12/10/2010 03:51 PM, Andre Przywara wrote:
>>> Newer SVM implementations provide the GPR number in the VMCB, so
>>> that the emulation path is no longer necesarry to handle debug
>>> register access intercepts. Implement the handling in svm.c and
>>> use it when the info is provided.
>>>
>>> +
>>> +    if (!err)
>>> +        skip_emulated_instruction(&svm->vcpu);
>>> +    else
>>> +        kvm_inject_gp(&svm->vcpu, 0);
>>> +
>>
>> This repeats, how about using complete_insn_gp()?
> Do you want this to be in x86.c?

Yes.

> We could use kvm_x86_ops->skip_emulated_instruction.
> Also shall I look for more similarities in the [CD]R intercept 
> handling code between VMX and SVM? The switch responsible for actually 
> reading and writing the regs is very similar. I could also try to 
> merge this with the functionality in the emulator (if that works, 
> haven't checked)
>

Let's get DecodeAssist merged first, and do more unification afterwards.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-10 13:51 ` [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler Andre Przywara
  2010-12-13 12:13   ` Avi Kivity
@ 2010-12-20 11:56   ` Marcelo Tosatti
  2010-12-20 13:20     ` Andre Przywara
  1 sibling, 1 reply; 17+ messages in thread
From: Marcelo Tosatti @ 2010-12-20 11:56 UTC (permalink / raw)
  To: Andre Przywara; +Cc: avi, kvm

On Fri, Dec 10, 2010 at 02:51:25PM +0100, Andre Przywara wrote:
> Newer SVM implementations provide the GPR number in the VMCB, so
> that the emulation path is no longer necesarry to handle CR
> register access intercepts. Implement the handling in svm.c and
> use it when the info is provided.
> 
> Signed-off-by: Andre Przywara <andre.przywara@amd.com>
> ---
>  arch/x86/include/asm/svm.h |    2 +
>  arch/x86/kvm/svm.c         |   91 ++++++++++++++++++++++++++++++++++++++-----
>  2 files changed, 82 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> index 11dbca7..589fc25 100644
> --- a/arch/x86/include/asm/svm.h
> +++ b/arch/x86/include/asm/svm.h
> @@ -256,6 +256,8 @@ struct __attribute__ ((__packed__)) vmcb {
>  #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
>  #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
>  
> +#define SVM_EXITINFO_REG_MASK 0x0F
> +
>  #define	SVM_EXIT_READ_CR0 	0x000
>  #define	SVM_EXIT_READ_CR3 	0x003
>  #define	SVM_EXIT_READ_CR4 	0x004
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 298ff79..ee5f100 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -2594,12 +2594,81 @@ static int emulate_on_interception(struct vcpu_svm *svm)
>  	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
>  }
>  
> +static int cr_interception(struct vcpu_svm *svm)
> +{
> +	int reg, cr;
> +	unsigned long val;
> +	int err;
> +
> +	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
> +		return emulate_on_interception(svm);
> +
> +	/* bit 63 is the valid bit, as not all instructions (like lmsw)
> +	   provide the information */
> +	if (unlikely((svm->vmcb->control.exit_info_1 & (1ULL << 63)) == 0))
> +		return emulate_on_interception(svm);
> +
> +	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
> +	cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
> +
> +	err = 0;
> +	if (cr >= 16) { /* mov to cr */
> +		cr -= 16;
> +		val = kvm_register_read(&svm->vcpu, reg);
> +		switch (cr) {
> +		case 0:
> +			err = kvm_set_cr0(&svm->vcpu, val);
> +			break;
> +		case 3:
> +			err = kvm_set_cr3(&svm->vcpu, val);
> +			break;
> +		case 4:
> +			err = kvm_set_cr4(&svm->vcpu, val);
> +			break;
> +		case 8:
> +			err = kvm_set_cr8(&svm->vcpu, val);
> +			break;
> +		default:
> +			WARN(1, "unhandled write to CR%d", cr);
> +			return EMULATE_FAIL;
> +		}

Wrong return value? Is WARN() really wanted?


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-20 11:56   ` Marcelo Tosatti
@ 2010-12-20 13:20     ` Andre Przywara
  0 siblings, 0 replies; 17+ messages in thread
From: Andre Przywara @ 2010-12-20 13:20 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: avi@redhat.com, kvm@vger.kernel.org

Marcelo Tosatti wrote:
> On Fri, Dec 10, 2010 at 02:51:25PM +0100, Andre Przywara wrote:
>> Newer SVM implementations provide the GPR number in the VMCB, so
>> that the emulation path is no longer necesarry to handle CR
>> register access intercepts. Implement the handling in svm.c and
>> use it when the info is provided.
>>
>> Signed-off-by: Andre Przywara <andre.przywara@amd.com>
>> ---
>>  arch/x86/include/asm/svm.h |    2 +
>>  arch/x86/kvm/svm.c         |   91 ++++++++++++++++++++++++++++++++++++++-----
>>  2 files changed, 82 insertions(+), 11 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
>> index 11dbca7..589fc25 100644
>> --- a/arch/x86/include/asm/svm.h
>> +++ b/arch/x86/include/asm/svm.h
>> @@ -256,6 +256,8 @@ struct __attribute__ ((__packed__)) vmcb {
>>  #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
>>  #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
>>  
>> +#define SVM_EXITINFO_REG_MASK 0x0F
>> +
>>  #define	SVM_EXIT_READ_CR0 	0x000
>>  #define	SVM_EXIT_READ_CR3 	0x003
>>  #define	SVM_EXIT_READ_CR4 	0x004
>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>> index 298ff79..ee5f100 100644
>> --- a/arch/x86/kvm/svm.c
>> +++ b/arch/x86/kvm/svm.c
>> @@ -2594,12 +2594,81 @@ static int emulate_on_interception(struct vcpu_svm *svm)
>>  	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
>>  }
>>  
>> +static int cr_interception(struct vcpu_svm *svm)
>> +{
>> +	int reg, cr;
>> +	unsigned long val;
>> +	int err;
>> +
>> +	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
>> +		return emulate_on_interception(svm);
>> +
>> +	/* bit 63 is the valid bit, as not all instructions (like lmsw)
>> +	   provide the information */
>> +	if (unlikely((svm->vmcb->control.exit_info_1 & (1ULL << 63)) == 0))
>> +		return emulate_on_interception(svm);
>> +
>> +	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
>> +	cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
>> +
>> +	err = 0;
>> +	if (cr >= 16) { /* mov to cr */
>> +		cr -= 16;
>> +		val = kvm_register_read(&svm->vcpu, reg);
>> +		switch (cr) {
>> +		case 0:
>> +			err = kvm_set_cr0(&svm->vcpu, val);
>> +			break;
>> +		case 3:
>> +			err = kvm_set_cr3(&svm->vcpu, val);
>> +			break;
>> +		case 4:
>> +			err = kvm_set_cr4(&svm->vcpu, val);
>> +			break;
>> +		case 8:
>> +			err = kvm_set_cr8(&svm->vcpu, val);
>> +			break;
>> +		default:
>> +			WARN(1, "unhandled write to CR%d", cr);
>> +			return EMULATE_FAIL;
>> +		}
> 
> Wrong return value?
Thats right, thanks for catching this. I copied this from somewhere else ;-(

> Is WARN() really wanted?
Well, this is a rather pathological code path. Illegal CR accesses 
generate #UD before interception, and additionally we don't set the 
intercept bits for these, so this should really never occur. But just 
for the sake of completeness I can inject an #UD here and return 1.
So I leave the WARN in to inform the user that something stupid is going 
on, but it could also be BUG().

Regards,
Andre.

-- 
Andre Przywara
AMD-OSRC (Dresden)
Tel: x29712


^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2010-12-20 13:23 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-12-10 13:51 [PATCH -v2 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
2010-12-10 13:51 ` [PATCH 1/5] kvm/svm: add new SVM feature bit names Andre Przywara
2010-12-10 13:51 ` [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler Andre Przywara
2010-12-13 12:13   ` Avi Kivity
2010-12-20 11:56   ` Marcelo Tosatti
2010-12-20 13:20     ` Andre Przywara
2010-12-10 13:51 ` [PATCH 3/5] kvm/svm: enhance mov DR " Andre Przywara
2010-12-13 12:14   ` Avi Kivity
2010-12-16 12:07     ` Andre Przywara
2010-12-16 12:48       ` Avi Kivity
2010-12-10 13:51 ` [PATCH 4/5] kvm/svm: implement enhanced INVLPG intercept Andre Przywara
2010-12-10 13:51 ` [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB Andre Przywara
2010-12-13 12:24   ` Avi Kivity
  -- strict thread matches above, loose matches on Subject: below --
2010-12-07 10:59 [PATCH 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
2010-12-07 10:59 ` [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler Andre Przywara
2010-12-07 13:24   ` Avi Kivity
2010-12-07 14:30     ` Andre Przywara
2010-12-07 14:41       ` Avi Kivity

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox