* [PATCH 1/8] kvm: fix CR8 handling
2010-12-21 10:11 [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Andre Przywara
@ 2010-12-21 10:12 ` Andre Przywara
2010-12-21 10:12 ` [PATCH 2/8] kvm: move complete_insn_gp() into x86.c Andre Przywara
` (8 subsequent siblings)
9 siblings, 0 replies; 12+ messages in thread
From: Andre Przywara @ 2010-12-21 10:12 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, Andre Przywara
The handling of CR8 writes in KVM is currently somewhat cumbersome.
This patch makes it look like the other CR register handlers
and fixes a possible issue in VMX, where the RIP would be incremented
despite an injected #GP.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/svm.c | 7 ++++---
arch/x86/kvm/vmx.c | 4 ++--
arch/x86/kvm/x86.c | 18 ++++++++----------
4 files changed, 15 insertions(+), 16 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4461429..cb5cad2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -661,7 +661,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
-void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 24b4373..06a0892 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2676,16 +2676,17 @@ static int cr0_write_interception(struct vcpu_svm *svm)
static int cr8_write_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
+ int r;
u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */
- emulate_instruction(&svm->vcpu, 0, 0, 0);
+ r = emulate_instruction(&svm->vcpu, 0, 0, 0);
if (irqchip_in_kernel(svm->vcpu.kvm)) {
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
- return 1;
+ return r == EMULATE_DONE;
}
if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
- return 1;
+ return r == EMULATE_DONE;
kvm_run->exit_reason = KVM_EXIT_SET_TPR;
return 0;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c195260..8e87bae 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3185,8 +3185,8 @@ static int handle_cr(struct kvm_vcpu *vcpu)
case 8: {
u8 cr8_prev = kvm_get_cr8(vcpu);
u8 cr8 = kvm_register_read(vcpu, reg);
- kvm_set_cr8(vcpu, cr8);
- skip_emulated_instruction(vcpu);
+ err = kvm_set_cr8(vcpu, cr8);
+ complete_insn_gp(vcpu, err);
if (irqchip_in_kernel(vcpu->kvm))
return 1;
if (cr8_prev <= cr8)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f569da8..2dbf68c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -662,7 +662,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
}
EXPORT_SYMBOL_GPL(kvm_set_cr3);
-int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
+int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if (cr8 & CR8_RESERVED_BITS)
return 1;
@@ -672,12 +672,6 @@ int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
vcpu->arch.cr8 = cr8;
return 0;
}
-
-void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
-{
- if (__kvm_set_cr8(vcpu, cr8))
- kvm_inject_gp(vcpu, 0);
-}
EXPORT_SYMBOL_GPL(kvm_set_cr8);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
@@ -4104,7 +4098,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
break;
case 8:
- res = __kvm_set_cr8(vcpu, val & 0xfUL);
+ res = kvm_set_cr8(vcpu, val);
break;
default:
vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
@@ -5381,8 +5375,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
/* re-sync apic's tpr */
- if (!irqchip_in_kernel(vcpu->kvm))
- kvm_set_cr8(vcpu, kvm_run->cr8);
+ if (!irqchip_in_kernel(vcpu->kvm)) {
+ if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
+ r = -EINVAL;
+ goto out;
+ }
+ }
if (vcpu->arch.pio.count || vcpu->mmio_needed) {
if (vcpu->mmio_needed) {
--
1.6.4
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 2/8] kvm: move complete_insn_gp() into x86.c
2010-12-21 10:11 [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Andre Przywara
2010-12-21 10:12 ` [PATCH 1/8] kvm: fix CR8 handling Andre Przywara
@ 2010-12-21 10:12 ` Andre Przywara
2010-12-21 10:12 ` [PATCH 3/8] kvm: cleanup emulate_instruction Andre Przywara
` (7 subsequent siblings)
9 siblings, 0 replies; 12+ messages in thread
From: Andre Przywara @ 2010-12-21 10:12 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, Andre Przywara
move the complete_insn_gp() helper function out of the VMX part
into the generic x86 part to make it usable by SVM.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/vmx.c | 16 ++++------------
arch/x86/kvm/x86.c | 9 +++++++++
3 files changed, 15 insertions(+), 12 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cb5cad2..cd4a990 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -828,4 +828,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8e87bae..fd8ffde 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3147,14 +3147,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
hypercall[2] = 0xc1;
}
-static void complete_insn_gp(struct kvm_vcpu *vcpu, int err)
-{
- if (err)
- kvm_inject_gp(vcpu, 0);
- else
- skip_emulated_instruction(vcpu);
-}
-
static int handle_cr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification, val;
@@ -3172,21 +3164,21 @@ static int handle_cr(struct kvm_vcpu *vcpu)
switch (cr) {
case 0:
err = kvm_set_cr0(vcpu, val);
- complete_insn_gp(vcpu, err);
+ kvm_complete_insn_gp(vcpu, err);
return 1;
case 3:
err = kvm_set_cr3(vcpu, val);
- complete_insn_gp(vcpu, err);
+ kvm_complete_insn_gp(vcpu, err);
return 1;
case 4:
err = kvm_set_cr4(vcpu, val);
- complete_insn_gp(vcpu, err);
+ kvm_complete_insn_gp(vcpu, err);
return 1;
case 8: {
u8 cr8_prev = kvm_get_cr8(vcpu);
u8 cr8 = kvm_register_read(vcpu, reg);
err = kvm_set_cr8(vcpu, cr8);
- complete_insn_gp(vcpu, err);
+ kvm_complete_insn_gp(vcpu, err);
if (irqchip_in_kernel(vcpu->kvm))
return 1;
if (cr8_prev <= cr8)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2dbf68c..1d54cb7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -334,6 +334,15 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
+void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
+{
+ if (err)
+ kvm_inject_gp(vcpu, 0);
+ else
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
+
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
++vcpu->stat.pf_guest;
--
1.6.4
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 3/8] kvm: cleanup emulate_instruction
2010-12-21 10:11 [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Andre Przywara
2010-12-21 10:12 ` [PATCH 1/8] kvm: fix CR8 handling Andre Przywara
2010-12-21 10:12 ` [PATCH 2/8] kvm: move complete_insn_gp() into x86.c Andre Przywara
@ 2010-12-21 10:12 ` Andre Przywara
2010-12-21 10:12 ` [PATCH 4/8] kvm/svm: add new SVM feature bit names Andre Przywara
` (6 subsequent siblings)
9 siblings, 0 replies; 12+ messages in thread
From: Andre Przywara @ 2010-12-21 10:12 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, Andre Przywara
emulate_instruction had many callers, but only one used all
parameters. One parameter was unused, another one is now
hidden by a wrapper function (required for a future addition
anyway), so most callers use now a shorter parameter list.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
arch/x86/include/asm/kvm_host.h | 11 +++++++++--
arch/x86/kvm/mmu.c | 2 +-
arch/x86/kvm/svm.c | 14 +++++++-------
arch/x86/kvm/vmx.c | 12 ++++++------
arch/x86/kvm/x86.c | 11 +++++------
5 files changed, 28 insertions(+), 22 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cd4a990..de00b60 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -634,8 +634,15 @@ enum emulation_result {
#define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2)
-int emulate_instruction(struct kvm_vcpu *vcpu,
- unsigned long cr2, u16 error_code, int emulation_type);
+int x86_emulate_instruction(struct kvm_vcpu *vcpu,
+ unsigned long cr2, int emulation_type);
+
+static inline int emulate_instruction(struct kvm_vcpu *vcpu,
+ int emulation_type)
+{
+ return x86_emulate_instruction(vcpu, 0, emulation_type);
+}
+
void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c3853d5..75334de 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3347,7 +3347,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
if (r)
goto out;
- er = emulate_instruction(vcpu, cr2, error_code, 0);
+ er = x86_emulate_instruction(vcpu, cr2, 0);
switch (er) {
case EMULATE_DONE:
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 06a0892..d49d73c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -475,7 +475,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
svm->next_rip = svm->vmcb->control.next_rip;
if (!svm->next_rip) {
- if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
+ if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
EMULATE_DONE)
printk(KERN_DEBUG "%s: NOP\n", __func__);
return;
@@ -1586,7 +1586,7 @@ static int ud_interception(struct vcpu_svm *svm)
{
int er;
- er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
+ er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
if (er != EMULATE_DONE)
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
@@ -1703,7 +1703,7 @@ static int io_interception(struct vcpu_svm *svm)
string = (io_info & SVM_IOIO_STR_MASK) != 0;
in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
if (string || in)
- return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+ return emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = io_info >> 16;
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -2648,12 +2648,12 @@ static int iret_interception(struct vcpu_svm *svm)
static int invlpg_interception(struct vcpu_svm *svm)
{
- return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+ return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
}
static int emulate_on_interception(struct vcpu_svm *svm)
{
- return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+ return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
}
static int cr0_write_interception(struct vcpu_svm *svm)
@@ -2661,7 +2661,7 @@ static int cr0_write_interception(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu;
int r;
- r = emulate_instruction(&svm->vcpu, 0, 0, 0);
+ r = emulate_instruction(&svm->vcpu, 0);
if (svm->nested.vmexit_rip) {
kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
@@ -2680,7 +2680,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */
- r = emulate_instruction(&svm->vcpu, 0, 0, 0);
+ r = emulate_instruction(&svm->vcpu, 0);
if (irqchip_in_kernel(svm->vcpu.kvm)) {
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
return r == EMULATE_DONE;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index fd8ffde..f3c60fb 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2939,7 +2939,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
* Cause the #SS fault with 0 error code in VM86 mode.
*/
if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
- if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE)
+ if (emulate_instruction(vcpu, 0) == EMULATE_DONE)
return 1;
/*
* Forward all other exceptions that are valid in real mode.
@@ -3036,7 +3036,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
}
if (is_invalid_opcode(intr_info)) {
- er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD);
+ er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
if (er != EMULATE_DONE)
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
@@ -3127,7 +3127,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
++vcpu->stat.io_exits;
if (string || in)
- return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+ return emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
@@ -3372,7 +3372,7 @@ static int handle_vmx_insn(struct kvm_vcpu *vcpu)
static int handle_invd(struct kvm_vcpu *vcpu)
{
- return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+ return emulate_instruction(vcpu, 0) == EMULATE_DONE;
}
static int handle_invlpg(struct kvm_vcpu *vcpu)
@@ -3403,7 +3403,7 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
static int handle_apic_access(struct kvm_vcpu *vcpu)
{
- return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+ return emulate_instruction(vcpu, 0) == EMULATE_DONE;
}
static int handle_task_switch(struct kvm_vcpu *vcpu)
@@ -3618,7 +3618,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
&& (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF))
return handle_interrupt_window(&vmx->vcpu);
- err = emulate_instruction(vcpu, 0, 0, 0);
+ err = emulate_instruction(vcpu, 0);
if (err == EMULATE_DO_MMIO) {
ret = 0;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1d54cb7..a6fcb76 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4363,10 +4363,9 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
return false;
}
-int emulate_instruction(struct kvm_vcpu *vcpu,
- unsigned long cr2,
- u16 error_code,
- int emulation_type)
+int x86_emulate_instruction(struct kvm_vcpu *vcpu,
+ unsigned long cr2,
+ int emulation_type)
{
int r;
struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
@@ -4474,7 +4473,7 @@ done:
return r;
}
-EXPORT_SYMBOL_GPL(emulate_instruction);
+EXPORT_SYMBOL_GPL(x86_emulate_instruction);
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
{
@@ -5398,7 +5397,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->mmio_needed = 0;
}
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
+ r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r != EMULATE_DONE) {
r = 0;
--
1.6.4
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 4/8] kvm/svm: add new SVM feature bit names
2010-12-21 10:11 [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Andre Przywara
` (2 preceding siblings ...)
2010-12-21 10:12 ` [PATCH 3/8] kvm: cleanup emulate_instruction Andre Przywara
@ 2010-12-21 10:12 ` Andre Przywara
2010-12-21 10:12 ` [PATCH 5/8] kvm/svm: enhance MOV CR intercept handler Andre Przywara
` (5 subsequent siblings)
9 siblings, 0 replies; 12+ messages in thread
From: Andre Przywara @ 2010-12-21 10:12 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, Andre Przywara
the recent APM Vol.2 and the recent AMD CPUID specification describe
new CPUID features bits for SVM. Name them here for later usage.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
arch/x86/kvm/svm.c | 4 ++++
1 files changed, 4 insertions(+), 0 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d49d73c..e2ea75f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -51,6 +51,10 @@ MODULE_LICENSE("GPL");
#define SVM_FEATURE_LBRV (1 << 1)
#define SVM_FEATURE_SVML (1 << 2)
#define SVM_FEATURE_NRIP (1 << 3)
+#define SVM_FEATURE_TSC_RATE (1 << 4)
+#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
+#define SVM_FEATURE_FLUSH_ASID (1 << 6)
+#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
--
1.6.4
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 5/8] kvm/svm: enhance MOV CR intercept handler
2010-12-21 10:11 [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Andre Przywara
` (3 preceding siblings ...)
2010-12-21 10:12 ` [PATCH 4/8] kvm/svm: add new SVM feature bit names Andre Przywara
@ 2010-12-21 10:12 ` Andre Przywara
2010-12-21 13:43 ` Avi Kivity
2010-12-21 10:12 ` [PATCH 6/8] kvm/svm: enhance mov DR " Andre Przywara
` (4 subsequent siblings)
9 siblings, 1 reply; 12+ messages in thread
From: Andre Przywara @ 2010-12-21 10:12 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, Andre Przywara
Newer SVM implementations provide the GPR number in the VMCB, so
that the emulation path is no longer necesarry to handle CR
register access intercepts. Implement the handling in svm.c and
use it when the info is provided.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
arch/x86/include/asm/svm.h | 2 +
arch/x86/kvm/svm.c | 90 ++++++++++++++++++++++++++++++++++++++-----
2 files changed, 81 insertions(+), 11 deletions(-)
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index f7087bf..f0ffb81 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -260,6 +260,8 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
+#define SVM_EXITINFO_REG_MASK 0x0F
+
#define SVM_EXIT_READ_CR0 0x000
#define SVM_EXIT_READ_CR3 0x003
#define SVM_EXIT_READ_CR4 0x004
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e2ea75f..b47e2e6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2660,12 +2660,80 @@ static int emulate_on_interception(struct vcpu_svm *svm)
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
}
+#define CR_VALID (1ULL << 63)
+
+static int cr_interception(struct vcpu_svm *svm)
+{
+ int reg, cr;
+ unsigned long val;
+ int err;
+
+ if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+ return emulate_on_interception(svm);
+
+ if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
+ return emulate_on_interception(svm);
+
+ reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+ cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+
+ err = 0;
+ if (cr >= 16) { /* mov to cr */
+ cr -= 16;
+ val = kvm_register_read(&svm->vcpu, reg);
+ switch (cr) {
+ case 0:
+ err = kvm_set_cr0(&svm->vcpu, val);
+ break;
+ case 3:
+ err = kvm_set_cr3(&svm->vcpu, val);
+ break;
+ case 4:
+ err = kvm_set_cr4(&svm->vcpu, val);
+ break;
+ case 8:
+ err = kvm_set_cr8(&svm->vcpu, val);
+ break;
+ default:
+ WARN(1, "unhandled write to CR%d", cr);
+ kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ return 1;
+ }
+ } else { /* mov from cr */
+ switch (cr) {
+ case 0:
+ val = kvm_read_cr0(&svm->vcpu);
+ break;
+ case 2:
+ val = svm->vcpu.arch.cr2;
+ break;
+ case 3:
+ val = svm->vcpu.arch.cr3;
+ break;
+ case 4:
+ val = kvm_read_cr4(&svm->vcpu);
+ break;
+ case 8:
+ val = kvm_get_cr8(&svm->vcpu);
+ break;
+ default:
+ WARN(1, "unhandled read from CR%d", cr);
+ kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ return 1;
+ }
+ kvm_register_write(&svm->vcpu, reg, val);
+ }
+ kvm_complete_insn_gp(&svm->vcpu, err);
+
+ return 1;
+}
+
static int cr0_write_interception(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
int r;
- r = emulate_instruction(&svm->vcpu, 0);
+ r = cr_interception(svm);
if (svm->nested.vmexit_rip) {
kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
@@ -2674,7 +2742,7 @@ static int cr0_write_interception(struct vcpu_svm *svm)
svm->nested.vmexit_rip = 0;
}
- return r == EMULATE_DONE;
+ return r;
}
static int cr8_write_interception(struct vcpu_svm *svm)
@@ -2684,13 +2752,13 @@ static int cr8_write_interception(struct vcpu_svm *svm)
u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */
- r = emulate_instruction(&svm->vcpu, 0);
+ r = cr_interception(svm);
if (irqchip_in_kernel(svm->vcpu.kvm)) {
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
- return r == EMULATE_DONE;
+ return r;
}
if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
- return r == EMULATE_DONE;
+ return r;
kvm_run->exit_reason = KVM_EXIT_SET_TPR;
return 0;
}
@@ -2933,14 +3001,14 @@ static int pause_interception(struct vcpu_svm *svm)
}
static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
- [SVM_EXIT_READ_CR0] = emulate_on_interception,
- [SVM_EXIT_READ_CR3] = emulate_on_interception,
- [SVM_EXIT_READ_CR4] = emulate_on_interception,
- [SVM_EXIT_READ_CR8] = emulate_on_interception,
+ [SVM_EXIT_READ_CR0] = cr_interception,
+ [SVM_EXIT_READ_CR3] = cr_interception,
+ [SVM_EXIT_READ_CR4] = cr_interception,
+ [SVM_EXIT_READ_CR8] = cr_interception,
[SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
[SVM_EXIT_WRITE_CR0] = cr0_write_interception,
- [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR3] = cr_interception,
+ [SVM_EXIT_WRITE_CR4] = cr_interception,
[SVM_EXIT_WRITE_CR8] = cr8_write_interception,
[SVM_EXIT_READ_DR0] = emulate_on_interception,
[SVM_EXIT_READ_DR1] = emulate_on_interception,
--
1.6.4
^ permalink raw reply related [flat|nested] 12+ messages in thread* Re: [PATCH 5/8] kvm/svm: enhance MOV CR intercept handler
2010-12-21 10:12 ` [PATCH 5/8] kvm/svm: enhance MOV CR intercept handler Andre Przywara
@ 2010-12-21 13:43 ` Avi Kivity
0 siblings, 0 replies; 12+ messages in thread
From: Avi Kivity @ 2010-12-21 13:43 UTC (permalink / raw)
To: Andre Przywara; +Cc: mtosatti, kvm
On 12/21/2010 12:12 PM, Andre Przywara wrote:
> Newer SVM implementations provide the GPR number in the VMCB, so
> that the emulation path is no longer necesarry to handle CR
> register access intercepts. Implement the handling in svm.c and
> use it when the info is provided.
>
>
> +static int cr_interception(struct vcpu_svm *svm)
> +{
>
...
> + case 3:
> + val = svm->vcpu.arch.cr3;
> + break;
This is a logical conflict with my cr3 cacheing patchset, which turns
this into kvm_read_cr3(). Marcelo, whichever you apply first, please be
sure to fix up the second.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 6/8] kvm/svm: enhance mov DR intercept handler
2010-12-21 10:11 [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Andre Przywara
` (4 preceding siblings ...)
2010-12-21 10:12 ` [PATCH 5/8] kvm/svm: enhance MOV CR intercept handler Andre Przywara
@ 2010-12-21 10:12 ` Andre Przywara
2010-12-21 10:12 ` [PATCH 7/8] kvm/svm: implement enhanced INVLPG intercept Andre Przywara
` (3 subsequent siblings)
9 siblings, 0 replies; 12+ messages in thread
From: Andre Przywara @ 2010-12-21 10:12 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, Andre Przywara
Newer SVM implementations provide the GPR number in the VMCB, so
that the emulation path is no longer necesarry to handle debug
register access intercepts. Implement the handling in svm.c and
use it when the info is provided.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
arch/x86/kvm/svm.c | 57 +++++++++++++++++++++++++++++++++++++--------------
1 files changed, 41 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b47e2e6..eb662da 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2745,6 +2745,31 @@ static int cr0_write_interception(struct vcpu_svm *svm)
return r;
}
+static int dr_interception(struct vcpu_svm *svm)
+{
+ int reg, dr;
+ unsigned long val;
+ int err;
+
+ if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
+ return emulate_on_interception(svm);
+
+ reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+ dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
+
+ if (dr >= 16) { /* mov to DRn */
+ val = kvm_register_read(&svm->vcpu, reg);
+ err = kvm_set_dr(&svm->vcpu, dr - 16, val);
+ } else {
+ err = kvm_get_dr(&svm->vcpu, dr, &val);
+ if (!err)
+ kvm_register_write(&svm->vcpu, reg, val);
+ }
+ kvm_complete_insn_gp(&svm->vcpu, err);
+
+ return 1;
+}
+
static int cr8_write_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
@@ -3010,22 +3035,22 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_WRITE_CR3] = cr_interception,
[SVM_EXIT_WRITE_CR4] = cr_interception,
[SVM_EXIT_WRITE_CR8] = cr8_write_interception,
- [SVM_EXIT_READ_DR0] = emulate_on_interception,
- [SVM_EXIT_READ_DR1] = emulate_on_interception,
- [SVM_EXIT_READ_DR2] = emulate_on_interception,
- [SVM_EXIT_READ_DR3] = emulate_on_interception,
- [SVM_EXIT_READ_DR4] = emulate_on_interception,
- [SVM_EXIT_READ_DR5] = emulate_on_interception,
- [SVM_EXIT_READ_DR6] = emulate_on_interception,
- [SVM_EXIT_READ_DR7] = emulate_on_interception,
- [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
- [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
- [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
- [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
- [SVM_EXIT_WRITE_DR4] = emulate_on_interception,
- [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
- [SVM_EXIT_WRITE_DR6] = emulate_on_interception,
- [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
+ [SVM_EXIT_READ_DR0] = dr_interception,
+ [SVM_EXIT_READ_DR1] = dr_interception,
+ [SVM_EXIT_READ_DR2] = dr_interception,
+ [SVM_EXIT_READ_DR3] = dr_interception,
+ [SVM_EXIT_READ_DR4] = dr_interception,
+ [SVM_EXIT_READ_DR5] = dr_interception,
+ [SVM_EXIT_READ_DR6] = dr_interception,
+ [SVM_EXIT_READ_DR7] = dr_interception,
+ [SVM_EXIT_WRITE_DR0] = dr_interception,
+ [SVM_EXIT_WRITE_DR1] = dr_interception,
+ [SVM_EXIT_WRITE_DR2] = dr_interception,
+ [SVM_EXIT_WRITE_DR3] = dr_interception,
+ [SVM_EXIT_WRITE_DR4] = dr_interception,
+ [SVM_EXIT_WRITE_DR5] = dr_interception,
+ [SVM_EXIT_WRITE_DR6] = dr_interception,
+ [SVM_EXIT_WRITE_DR7] = dr_interception,
[SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
[SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
[SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
--
1.6.4
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 7/8] kvm/svm: implement enhanced INVLPG intercept
2010-12-21 10:11 [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Andre Przywara
` (5 preceding siblings ...)
2010-12-21 10:12 ` [PATCH 6/8] kvm/svm: enhance mov DR " Andre Przywara
@ 2010-12-21 10:12 ` Andre Przywara
2010-12-21 10:12 ` [PATCH 8/8] kvm/svm: copy instruction bytes from VMCB Andre Przywara
` (2 subsequent siblings)
9 siblings, 0 replies; 12+ messages in thread
From: Andre Przywara @ 2010-12-21 10:12 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, Andre Przywara
When the DecodeAssist feature is available, the linear address
is provided in the VMCB on INVLPG intercepts. Use it directly to
avoid any decoding and emulation.
This is only useful for shadow paging, though.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
arch/x86/kvm/svm.c | 7 ++++++-
1 files changed, 6 insertions(+), 1 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index eb662da..7871702 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2652,7 +2652,12 @@ static int iret_interception(struct vcpu_svm *svm)
static int invlpg_interception(struct vcpu_svm *svm)
{
- return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+ if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+ return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+
+ kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
+ skip_emulated_instruction(&svm->vcpu);
+ return 1;
}
static int emulate_on_interception(struct vcpu_svm *svm)
--
1.6.4
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 8/8] kvm/svm: copy instruction bytes from VMCB
2010-12-21 10:11 [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Andre Przywara
` (6 preceding siblings ...)
2010-12-21 10:12 ` [PATCH 7/8] kvm/svm: implement enhanced INVLPG intercept Andre Przywara
@ 2010-12-21 10:12 ` Andre Przywara
2010-12-21 13:41 ` [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Avi Kivity
2010-12-23 12:47 ` Marcelo Tosatti
9 siblings, 0 replies; 12+ messages in thread
From: Andre Przywara @ 2010-12-21 10:12 UTC (permalink / raw)
To: avi, mtosatti; +Cc: kvm, Andre Przywara
In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
arch/x86/include/asm/kvm_emulate.h | 2 +-
arch/x86/include/asm/kvm_host.h | 9 +++++----
arch/x86/include/asm/svm.h | 4 +++-
arch/x86/kvm/emulate.c | 7 +++++--
arch/x86/kvm/mmu.c | 5 +++--
arch/x86/kvm/svm.c | 4 +++-
arch/x86/kvm/vmx.c | 4 ++--
arch/x86/kvm/x86.c | 6 ++++--
8 files changed, 26 insertions(+), 15 deletions(-)
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index bf70ece..8e37deb 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -265,7 +265,7 @@ struct x86_emulate_ctxt {
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
#endif
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt);
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
#define EMULATION_FAILED -1
#define EMULATION_OK 0
#define EMULATION_RESTART 1
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index de00b60..6268f6c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -634,13 +634,13 @@ enum emulation_result {
#define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2)
-int x86_emulate_instruction(struct kvm_vcpu *vcpu,
- unsigned long cr2, int emulation_type);
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+ int emulation_type, void *insn, int insn_len);
static inline int emulate_instruction(struct kvm_vcpu *vcpu,
int emulation_type)
{
- return x86_emulate_instruction(vcpu, 0, emulation_type);
+ return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
}
void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
@@ -721,7 +721,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+ void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_enable_tdp(void);
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index f0ffb81..f2b83bc 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -83,7 +83,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u32 clean;
u32 reserved_5;
u64 next_rip;
- u8 reserved_6[816];
+ u8 insn_len;
+ u8 insn_bytes[15];
+ u8 reserved_6[800];
};
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6366735..02a0041 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2610,7 +2610,7 @@ done:
}
int
-x86_decode_insn(struct x86_emulate_ctxt *ctxt)
+x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
struct x86_emulate_ops *ops = ctxt->ops;
struct decode_cache *c = &ctxt->decode;
@@ -2621,7 +2621,10 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt)
struct operand memop = { .type = OP_NONE };
c->eip = ctxt->eip;
- c->fetch.start = c->fetch.end = c->eip;
+ c->fetch.start = c->eip;
+ c->fetch.end = c->fetch.start + insn_len;
+ if (insn_len > 0)
+ memcpy(c->fetch.data, insn, insn_len);
ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
switch (mode) {
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 75334de..397a98f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3329,7 +3329,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
}
}
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
+ void *insn, int insn_len)
{
int r;
enum emulation_result er;
@@ -3347,7 +3348,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
if (r)
goto out;
- er = x86_emulate_instruction(vcpu, cr2, 0);
+ er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
switch (er) {
case EMULATE_DONE:
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 7871702..00a6117 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1527,7 +1527,9 @@ static int pf_interception(struct vcpu_svm *svm)
trace_kvm_page_fault(fault_address, error_code);
if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
- r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
+ r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
+ svm->vmcb->control.insn_bytes,
+ svm->vmcb->control.insn_len);
break;
case KVM_PV_REASON_PAGE_NOT_PRESENT:
svm->apf_reason = 0;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f3c60fb..736f839 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3055,7 +3055,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
if (kvm_event_needs_reinjection(vcpu))
kvm_mmu_unprotect_page_virt(vcpu, cr2);
- return kvm_mmu_page_fault(vcpu, cr2, error_code);
+ return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
}
if (vmx->rmode.vm86_active &&
@@ -3502,7 +3502,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
trace_kvm_page_fault(gpa, exit_qualification);
- return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3);
+ return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0);
}
static u64 ept_rsvd_mask(u64 spte, int level)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a6fcb76..7ad9cda 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4365,7 +4365,9 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
- int emulation_type)
+ int emulation_type,
+ void *insn,
+ int insn_len)
{
int r;
struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
@@ -4386,7 +4388,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
vcpu->arch.emulate_ctxt.have_exception = false;
vcpu->arch.emulate_ctxt.perm_ok = false;
- r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
+ r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
if (r == X86EMUL_PROPAGATE_FAULT)
goto done;
--
1.6.4
^ permalink raw reply related [flat|nested] 12+ messages in thread* Re: [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features
2010-12-21 10:11 [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Andre Przywara
` (7 preceding siblings ...)
2010-12-21 10:12 ` [PATCH 8/8] kvm/svm: copy instruction bytes from VMCB Andre Przywara
@ 2010-12-21 13:41 ` Avi Kivity
2010-12-23 12:47 ` Marcelo Tosatti
9 siblings, 0 replies; 12+ messages in thread
From: Avi Kivity @ 2010-12-21 13:41 UTC (permalink / raw)
To: Andre Przywara; +Cc: mtosatti, kvm
On 12/21/2010 12:11 PM, Andre Przywara wrote:
> Hi,
>
> this is version 3 of the DecodeAssist patches.
> I added 3 clean up patches which are not SVM specific.
> Changes between v2 and v3:
> - now includes the (unchanged) CR8 handling fix
> - move complete_insn_gp() helper function into x86.c
> - remove unnecessary comment
> - fix handling of illegal CR accesses (inject #UD, should actually not occur)
> - completely rework the instruction bytes copy patch
> Now this propagates the addr/len pair from the interception handling
> into the emulator. For this I cleaned up this code path a bit
> (patch 3), so it does not blow up all current users of emulate_instruction.
>
> Changes between v1 and v2:
> - goes on top of the CR8 handling fix I sent out earlier this week
> (required for proper handling of CR8 exceptions)
> - handles exception cases properly (for mov cr and mov dr)
> - uses X86_FEATURE_ names instead of SVM_FEATURE names (for boot_cpu_has)
> (thanks to Joerg for spotting this)
> - use static_cpu_has where appropriate
> - some minor code cleanups (for instance cr register calculation)
> - move prefetch callback into x86_decode_insn and out of every fetch
> I refrained from ditching the callback at all, as I dont like extending
> every emulate_instruction call with "NULL, 0". But if this is
> desperately needed, I can still change it.
> - rename vendor specific prefetch function names
>
> Upcoming AMD CPUs will have a SVM enhancement called DecodeAssist
> which will provide more information when intercepting certain events.
> These information allows to skip the instruction fetching and
> decoding and handle the intercept immediately.
> This patch set implements all the features which are documented
> in the recent AMD manual (APM vol. 2). For details see the patches.
>
Looks good.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features
2010-12-21 10:11 [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Andre Przywara
` (8 preceding siblings ...)
2010-12-21 13:41 ` [PATCH -v3 0/8] kvm/svm: implement new DecodeAssist features Avi Kivity
@ 2010-12-23 12:47 ` Marcelo Tosatti
9 siblings, 0 replies; 12+ messages in thread
From: Marcelo Tosatti @ 2010-12-23 12:47 UTC (permalink / raw)
To: Andre Przywara; +Cc: avi, kvm
On Tue, Dec 21, 2010 at 11:11:59AM +0100, Andre Przywara wrote:
> Hi,
>
> this is version 3 of the DecodeAssist patches.
> I added 3 clean up patches which are not SVM specific.
> Changes between v2 and v3:
> - now includes the (unchanged) CR8 handling fix
> - move complete_insn_gp() helper function into x86.c
> - remove unnecessary comment
> - fix handling of illegal CR accesses (inject #UD, should actually not occur)
> - completely rework the instruction bytes copy patch
> Now this propagates the addr/len pair from the interception handling
> into the emulator. For this I cleaned up this code path a bit
> (patch 3), so it does not blow up all current users of emulate_instruction.
>
> Changes between v1 and v2:
> - goes on top of the CR8 handling fix I sent out earlier this week
> (required for proper handling of CR8 exceptions)
> - handles exception cases properly (for mov cr and mov dr)
> - uses X86_FEATURE_ names instead of SVM_FEATURE names (for boot_cpu_has)
> (thanks to Joerg for spotting this)
> - use static_cpu_has where appropriate
> - some minor code cleanups (for instance cr register calculation)
> - move prefetch callback into x86_decode_insn and out of every fetch
> I refrained from ditching the callback at all, as I dont like extending
> every emulate_instruction call with "NULL, 0". But if this is
> desperately needed, I can still change it.
> - rename vendor specific prefetch function names
>
> Upcoming AMD CPUs will have a SVM enhancement called DecodeAssist
> which will provide more information when intercepting certain events.
> These information allows to skip the instruction fetching and
> decoding and handle the intercept immediately.
> This patch set implements all the features which are documented
> in the recent AMD manual (APM vol. 2). For details see the patches.
>
> Please review and apply.
>
> Regards,
> Andre.
Applied, thanks (fixed DR patch to not inject exceptions since
its already done by get_dr/set_dr).
^ permalink raw reply [flat|nested] 12+ messages in thread