* [PATCH 1/5] vmx/hvm: move mov-cr handling functions to generic HVM code
2011-04-15 12:16 [PATCH 0/5] svm: implement new DecodeAssist feature Andre Przywara
@ 2011-04-15 12:21 ` Andre Przywara
2011-04-18 8:48 ` Keir Fraser
2011-04-15 12:21 ` [PATCH 2/5] svm: add bit definitions for SVM DecodeAssist Andre Przywara
` (3 subsequent siblings)
4 siblings, 1 reply; 10+ messages in thread
From: Andre Przywara @ 2011-04-15 12:21 UTC (permalink / raw)
To: Keir Fraser; +Cc: xen-devel
[-- Attachment #1: Type: text/plain, Size: 240 bytes --]
Currently the handling of CR accesses intercepts is done much
differently in SVM and VMX. For future usage move the VMX part
into the generic HVM path and use the exported functions.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
[-- Attachment #2: da_1.patch --]
[-- Type: text/plain, Size: 12487 bytes --]
commit 48aec1e4591449768193b0221c7304f56a3dbe5c
Author: Andre Przywara <andre.przywara@amd.com>
Date: Sun Jul 25 13:56:25 2010 +0200
vmx/hvm: move mov-cr handling functions to generic HVM code
Currently the handling of CR accesses intercepts is done much
differently in SVM and VMX. For future usage move the VMX part
into the generic HVM path and use the exported functions.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index edeffe0..8cc6636 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1409,6 +1409,74 @@ static void hvm_set_uc_mode(struct vcpu *v, bool_t is_in_uc_mode)
return hvm_funcs.set_uc_mode(v);
}
+#ifdef __i386__
+#define GPRNAME(suffix) e ## suffix
+#else
+#define GPRNAME(suffix) r ## suffix
+#endif
+
+#define GPR_GET_CASE(num, name, val) case num: val = regs->GPRNAME(name);break
+#define GPR_SET_CASE(num, name, val) case num: regs->GPRNAME(name) = val;break
+
+#define GPR_CASES(GS, value) \
+ GPR_ ## GS ## ET_CASE(0, ax, value); \
+ GPR_ ## GS ## ET_CASE(1, cx, value); \
+ GPR_ ## GS ## ET_CASE(2, dx, value); \
+ GPR_ ## GS ## ET_CASE(3, bx, value); \
+ GPR_ ## GS ## ET_CASE(4, sp, value); \
+ GPR_ ## GS ## ET_CASE(5, bp, value); \
+ GPR_ ## GS ## ET_CASE(6, si, value); \
+ GPR_ ## GS ## ET_CASE(7, di, value);
+
+#define GPR_64_CASES(GS, value) \
+ GPR_ ## GS ## ET_CASE(8, 8, value); \
+ GPR_ ## GS ## ET_CASE(9, 9, value); \
+ GPR_ ## GS ## ET_CASE(10, 10, value); \
+ GPR_ ## GS ## ET_CASE(11, 11, value); \
+ GPR_ ## GS ## ET_CASE(12, 12, value); \
+ GPR_ ## GS ## ET_CASE(13, 13, value); \
+ GPR_ ## GS ## ET_CASE(14, 14, value); \
+ GPR_ ## GS ## ET_CASE(15, 15, value);
+
+int hvm_get_gpr(struct cpu_user_regs *regs, int gpr,
+ unsigned long *valueptr)
+{
+ uint64_t mask;
+ unsigned long value;
+ int width = 0;
+
+ if (width == 0) {
+ mask = (unsigned long)-1;
+ } else {
+ if (width >= 64)
+ mask = (uint64_t)-1;
+ else
+ mask = (1ULL << width) - 1;
+ }
+
+ switch(gpr) {
+ GPR_CASES(G, value)
+#ifndef __i386__
+ GPR_64_CASES(G, value)
+#endif
+ default: return 1;
+ }
+ *valueptr = value & mask;
+ return 0;
+}
+
+int hvm_set_gpr(struct cpu_user_regs *regs, int gpr, unsigned long value)
+{
+ switch(gpr) {
+ GPR_CASES(S, value)
+#ifndef __i386__
+ GPR_64_CASES(S, value)
+#endif
+ default: return 1;
+ }
+ return 0;
+}
+
int hvm_set_cr0(unsigned long value)
{
struct vcpu *v = current;
@@ -4082,6 +4150,85 @@ enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
return hvm_funcs.nhvm_intr_blocked(v);
}
+int hvm_mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
+{
+ unsigned long value;
+ struct vcpu *v = current;
+ struct vlapic *vlapic = vcpu_vlapic(v);
+
+ if (hvm_get_gpr(regs, gp, &value)) {
+ gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
+ goto exit_and_crash;
+ }
+
+ HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(value));
+
+ HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
+
+ switch ( cr )
+ {
+ case 0:
+ return !hvm_set_cr0(value);
+
+ case 3:
+ return !hvm_set_cr3(value);
+
+ case 4:
+ return !hvm_set_cr4(value);
+
+ case 8:
+ vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
+ break;
+
+ default:
+ gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
+ goto exit_and_crash;
+ }
+
+ return 1;
+
+ exit_and_crash:
+ domain_crash(v->domain);
+ return 0;
+}
+
+/*
+ * Read from control registers. CR0 and CR4 are handled before.
+ */
+void hvm_mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
+{
+ unsigned long value = 0;
+ struct vcpu *v = current;
+ struct vlapic *vlapic = vcpu_vlapic(v);
+
+ switch ( cr )
+ {
+ case 0:
+ case 2:
+ case 3:
+ case 4:
+ value = (unsigned long)v->arch.hvm_vcpu.guest_cr[cr];
+ break;
+ case 8:
+ value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
+ value = (value & 0xF0) >> 4;
+ break;
+ default:
+ gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
+ domain_crash(v->domain);
+ break;
+ }
+
+ if (hvm_set_gpr(regs, gp, value) != 0) {
+ printk("invalid gp: %d\n", gp);
+ domain_crash(v->domain);
+ }
+
+ HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(value));
+
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index cda6420..e6574b0 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1554,142 +1554,6 @@ static void vmx_invlpg_intercept(unsigned long vaddr)
vpid_sync_vcpu_gva(curr, vaddr);
}
-#define CASE_SET_REG(REG, reg) \
- case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: regs->reg = value; break
-#define CASE_GET_REG(REG, reg) \
- case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: value = regs->reg; break
-
-#define CASE_EXTEND_SET_REG \
- CASE_EXTEND_REG(S)
-#define CASE_EXTEND_GET_REG \
- CASE_EXTEND_REG(G)
-
-#ifdef __i386__
-#define CASE_EXTEND_REG(T)
-#else
-#define CASE_EXTEND_REG(T) \
- CASE_ ## T ## ET_REG(R8, r8); \
- CASE_ ## T ## ET_REG(R9, r9); \
- CASE_ ## T ## ET_REG(R10, r10); \
- CASE_ ## T ## ET_REG(R11, r11); \
- CASE_ ## T ## ET_REG(R12, r12); \
- CASE_ ## T ## ET_REG(R13, r13); \
- CASE_ ## T ## ET_REG(R14, r14); \
- CASE_ ## T ## ET_REG(R15, r15)
-#endif
-
-static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
-{
- unsigned long value;
- struct vcpu *v = current;
- struct vlapic *vlapic = vcpu_vlapic(v);
- int rc = 0;
- unsigned long old;
-
- switch ( gp )
- {
- CASE_GET_REG(EAX, eax);
- CASE_GET_REG(ECX, ecx);
- CASE_GET_REG(EDX, edx);
- CASE_GET_REG(EBX, ebx);
- CASE_GET_REG(EBP, ebp);
- CASE_GET_REG(ESI, esi);
- CASE_GET_REG(EDI, edi);
- CASE_GET_REG(ESP, esp);
- CASE_EXTEND_GET_REG;
- default:
- gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
- goto exit_and_crash;
- }
-
- HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(value));
-
- HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
-
- switch ( cr )
- {
- case 0:
- old = v->arch.hvm_vcpu.guest_cr[0];
- rc = !hvm_set_cr0(value);
- if (rc)
- hvm_memory_event_cr0(value, old);
- return rc;
-
- case 3:
- old = v->arch.hvm_vcpu.guest_cr[3];
- rc = !hvm_set_cr3(value);
- if (rc)
- hvm_memory_event_cr3(value, old);
- return rc;
-
- case 4:
- old = v->arch.hvm_vcpu.guest_cr[4];
- rc = !hvm_set_cr4(value);
- if (rc)
- hvm_memory_event_cr4(value, old);
- return rc;
-
- case 8:
- vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
- break;
-
- default:
- gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
- goto exit_and_crash;
- }
-
- return 1;
-
- exit_and_crash:
- domain_crash(v->domain);
- return 0;
-}
-
-/*
- * Read from control registers. CR0 and CR4 are read from the shadow.
- */
-static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
-{
- unsigned long value = 0;
- struct vcpu *v = current;
- struct vlapic *vlapic = vcpu_vlapic(v);
-
- switch ( cr )
- {
- case 3:
- value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
- break;
- case 8:
- value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
- value = (value & 0xF0) >> 4;
- break;
- default:
- gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
- domain_crash(v->domain);
- break;
- }
-
- switch ( gp ) {
- CASE_SET_REG(EAX, eax);
- CASE_SET_REG(ECX, ecx);
- CASE_SET_REG(EDX, edx);
- CASE_SET_REG(EBX, ebx);
- CASE_SET_REG(EBP, ebp);
- CASE_SET_REG(ESI, esi);
- CASE_SET_REG(EDI, edi);
- CASE_SET_REG(ESP, esp);
- CASE_EXTEND_SET_REG;
- default:
- printk("invalid gp: %d\n", gp);
- domain_crash(v->domain);
- break;
- }
-
- HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(value));
-
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
-}
-
static int vmx_cr_access(unsigned long exit_qualification,
struct cpu_user_regs *regs)
{
@@ -1702,11 +1566,11 @@ static int vmx_cr_access(unsigned long exit_qualification,
case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
- return mov_to_cr(gp, cr, regs);
+ return hvm_mov_to_cr(gp, cr, regs);
case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR:
gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
- mov_from_cr(cr, gp, regs);
+ hvm_mov_from_cr(cr, gp, regs);
break;
case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
{
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 12bd8a8..13f3a4f 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -311,6 +311,23 @@ static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \
(xsave_enabled(_v) ? X86_CR4_OSXSAVE : 0))))
+#define HVM_GPR_NR_EAX 0
+#define HVM_GPR_NR_ECX 1
+#define HVM_GPR_NR_EDX 2
+#define HVM_GPR_NR_EBX 3
+#define HVM_GPR_NR_ESP 4
+#define HVM_GPR_NR_EBP 5
+#define HVM_GPR_NR_ESI 6
+#define HVM_GPR_NR_EDI 7
+#define HVM_GPR_NR_R8 8
+#define HVM_GPR_NR_R9 9
+#define HVM_GPR_NR_R10 10
+#define HVM_GPR_NR_R11 11
+#define HVM_GPR_NR_R12 12
+#define HVM_GPR_NR_R13 13
+#define HVM_GPR_NR_R14 14
+#define HVM_GPR_NR_R15 15
+
/* These exceptions must always be intercepted. */
#define HVM_TRAP_MASK ((1U << TRAP_machine_check) | (1U << TRAP_invalid_op))
@@ -389,6 +406,8 @@ int hvm_hap_nested_page_fault(unsigned long gpa,
int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t msr_content);
+int hvm_mov_to_cr(int gp, int cr, struct cpu_user_regs *regs);
+void hvm_mov_from_cr(int cr, int gp, struct cpu_user_regs *regs);
#ifdef __x86_64__
/* Called for current VCPU on crX changes by guest */
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 92e96e3..9a9e10f 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -129,6 +129,8 @@ void hvm_triple_fault(void);
void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
int hvm_handle_xsetbv(u64 new_bv);
+int hvm_get_gpr(struct cpu_user_regs *regs, int gpr, unsigned long *valueptr);
+int hvm_set_gpr(struct cpu_user_regs *regs, int gpr, unsigned long value);
/* These functions all return X86EMUL return codes. */
int hvm_set_efer(uint64_t value);
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 8e685e4..dcadc0e 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -153,22 +153,6 @@ void vmx_update_cpu_exec_control(struct vcpu *v);
#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR (1 << 4)
#define VMX_CONTROL_REG_ACCESS_TYPE_CLTS (2 << 4)
#define VMX_CONTROL_REG_ACCESS_TYPE_LMSW (3 << 4)
-#define VMX_CONTROL_REG_ACCESS_GPR_EAX (0 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ECX (1 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EDX (2 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EBX (3 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ESP (4 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EBP (5 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ESI (6 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EDI (7 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R8 (8 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R9 (9 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R10 (10 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R11 (11 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R12 (12 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R13 (13 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R14 (14 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R15 (15 << 8)
/*
* Access Rights
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH 3/5] svm: implement instruction fetch part of DecodeAssist
2011-04-15 12:16 [PATCH 0/5] svm: implement new DecodeAssist feature Andre Przywara
2011-04-15 12:21 ` [PATCH 1/5] vmx/hvm: move mov-cr handling functions to generic HVM code Andre Przywara
2011-04-15 12:21 ` [PATCH 2/5] svm: add bit definitions for SVM DecodeAssist Andre Przywara
@ 2011-04-15 12:22 ` Andre Przywara
2011-04-18 9:10 ` Keir Fraser
2011-04-18 12:37 ` Keir Fraser
2011-04-15 12:23 ` [PATCH 4/5] svm: implement CR access " Andre Przywara
2011-04-15 12:24 ` [PATCH 5/5] svm: implement INVLPG " Andre Przywara
4 siblings, 2 replies; 10+ messages in thread
From: Andre Przywara @ 2011-04-15 12:22 UTC (permalink / raw)
To: Keir Fraser; +Cc: xen-devel
[-- Attachment #1: Type: text/plain, Size: 500 bytes --]
Newer SVM implementations (Bulldozer) copy up to 15 bytes from the
instruction stream into the VMCB when a #PF or #NPF exception is
intercepted. This patch makes use of this information if available.
This saves us from a) traversing the guest's page tables, b) mapping
the guest's memory and c) copy the instructions from there into the
hypervisor's address space.
This speeds up #NPF intercepts quite a lot and avoids cache and TLB
trashing.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
[-- Attachment #2: da_3.patch --]
[-- Type: text/plain, Size: 4984 bytes --]
commit 242318e2ae4229ba3ac6a5253f3f4722133348a6
Author: Andre Przywara <andre.przywara@amd.com>
Date: Sun Jul 25 14:33:21 2010 +0200
svm: implement instruction fetch part of DecodeAssist
Newer SVM implementations (Bulldozer) copy up to 15 bytes from the
instruction stream into the VMCB when a #PF or #NPF exception is
intercepted. This patch makes use of this information if available.
This saves us from a) traversing the guest's page tables, b) mapping
the guest's memory and c) copy the instructions from there into the
hypervisor's address space.
This speeds up #NPF intercepts quite a lot and avoids cache and TLB
trashing.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index af903c9..7074d83 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -995,15 +995,22 @@ int hvm_emulate_one(
pfec |= PFEC_user_mode;
hvmemul_ctxt->insn_buf_eip = regs->eip;
- hvmemul_ctxt->insn_buf_bytes =
- (hvm_virtual_to_linear_addr(
- x86_seg_cs, &hvmemul_ctxt->seg_reg[x86_seg_cs],
- regs->eip, sizeof(hvmemul_ctxt->insn_buf),
- hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) &&
- !hvm_fetch_from_guest_virt_nofault(
- hvmemul_ctxt->insn_buf, addr,
- sizeof(hvmemul_ctxt->insn_buf), pfec))
- ? sizeof(hvmemul_ctxt->insn_buf) : 0;
+ if (curr->arch.hvm_vcpu.guest_ins_len == 0) {
+ hvmemul_ctxt->insn_buf_bytes =
+ (hvm_virtual_to_linear_addr(
+ x86_seg_cs, &hvmemul_ctxt->seg_reg[x86_seg_cs],
+ regs->eip, sizeof(hvmemul_ctxt->insn_buf),
+ hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) &&
+ !hvm_fetch_from_guest_virt_nofault(
+ hvmemul_ctxt->insn_buf, addr,
+ sizeof(hvmemul_ctxt->insn_buf), pfec))
+ ? sizeof(hvmemul_ctxt->insn_buf) : 0;
+ } else {
+ hvmemul_ctxt->insn_buf_bytes = curr->arch.hvm_vcpu.guest_ins_len;
+ memcpy(hvmemul_ctxt->insn_buf, curr->arch.hvm_vcpu.guest_ins,
+ hvmemul_ctxt->insn_buf_bytes);
+ curr->arch.hvm_vcpu.guest_ins_len = 0;
+ }
hvmemul_ctxt->exn_pending = 0;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 279220a..861c2c7 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1589,6 +1589,18 @@ static void svm_invlpg_intercept(unsigned long vaddr)
svm_asid_g_invlpg(curr, vaddr);
}
+static void svm_set_instruction_bytes(struct vmcb_struct *vmcb)
+{
+ struct vcpu *curr = current;
+ int len = vmcb->guest_ins_len & 0x0F;
+
+ if (len == 0)
+ return;
+ curr->arch.hvm_vcpu.guest_ins_len = len;
+ curr->arch.hvm_vcpu.guest_ins = vmcb->guest_ins;
+ return;
+}
+
static struct hvm_function_table __read_mostly svm_function_table = {
.name = "SVM",
.cpu_up_prepare = svm_cpu_up_prepare,
@@ -1801,6 +1813,7 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
unsigned long va;
va = vmcb->exitinfo2;
regs->error_code = vmcb->exitinfo1;
+ svm_set_instruction_bytes(vmcb);
HVM_DBG_LOG(DBG_LEVEL_VMMU,
"eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
(unsigned long)regs->eax, (unsigned long)regs->ebx,
@@ -1809,6 +1822,7 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
if ( paging_fault(va, regs) )
{
+ v->arch.hvm_vcpu.guest_ins_len = 0;
if ( trace_will_trace_event(TRC_SHADOW) )
break;
if ( hvm_long_mode_enabled(v) )
@@ -1817,6 +1831,7 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
HVMTRACE_2D(PF_XEN, regs->error_code, va);
break;
}
+ v->arch.hvm_vcpu.guest_ins_len = 0;
hvm_inject_exception(TRAP_page_fault, regs->error_code, va);
break;
@@ -1968,6 +1983,7 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
case VMEXIT_NPF:
perfc_incra(svmexits, VMEXIT_NPF_PERFC);
regs->error_code = vmcb->exitinfo1;
+ svm_set_instruction_bytes(vmcb);
svm_do_nested_pgfault(v, regs, vmcb->exitinfo2);
break;
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index eabecaa..741bee8 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -140,6 +140,9 @@ struct hvm_vcpu {
unsigned long mmio_gva;
unsigned long mmio_gpfn;
+ uint8_t guest_ins_len;
+ void *guest_ins;
+
/* Callback into x86_emulate when emulating FPU/MMX/XMM instructions. */
void (*fpu_exception_callback)(void *, struct cpu_user_regs *);
void *fpu_exception_callback_arg;
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply related [flat|nested] 10+ messages in thread