From: Andre Przywara <andre.przywara@amd.com>
To: Keir Fraser <keir.fraser@eu.citrix.com>
Cc: xen-devel <xen-devel@lists.xensource.com>
Subject: [PATCH 1/5] vmx/hvm: move mov-cr handling functions to generic HVM code
Date: Fri, 15 Apr 2011 14:21:17 +0200 [thread overview]
Message-ID: <4DA8383D.2050800@amd.com> (raw)
In-Reply-To: <4DA83719.5080106@amd.com>
[-- Attachment #1: Type: text/plain, Size: 240 bytes --]
Currently the handling of CR accesses intercepts is done much
differently in SVM and VMX. For future usage move the VMX part
into the generic HVM path and use the exported functions.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
[-- Attachment #2: da_1.patch --]
[-- Type: text/plain, Size: 12487 bytes --]
commit 48aec1e4591449768193b0221c7304f56a3dbe5c
Author: Andre Przywara <andre.przywara@amd.com>
Date: Sun Jul 25 13:56:25 2010 +0200
vmx/hvm: move mov-cr handling functions to generic HVM code
Currently the handling of CR accesses intercepts is done much
differently in SVM and VMX. For future usage move the VMX part
into the generic HVM path and use the exported functions.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index edeffe0..8cc6636 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1409,6 +1409,74 @@ static void hvm_set_uc_mode(struct vcpu *v, bool_t is_in_uc_mode)
return hvm_funcs.set_uc_mode(v);
}
+#ifdef __i386__
+#define GPRNAME(suffix) e ## suffix
+#else
+#define GPRNAME(suffix) r ## suffix
+#endif
+
+#define GPR_GET_CASE(num, name, val) case num: val = regs->GPRNAME(name);break
+#define GPR_SET_CASE(num, name, val) case num: regs->GPRNAME(name) = val;break
+
+#define GPR_CASES(GS, value) \
+ GPR_ ## GS ## ET_CASE(0, ax, value); \
+ GPR_ ## GS ## ET_CASE(1, cx, value); \
+ GPR_ ## GS ## ET_CASE(2, dx, value); \
+ GPR_ ## GS ## ET_CASE(3, bx, value); \
+ GPR_ ## GS ## ET_CASE(4, sp, value); \
+ GPR_ ## GS ## ET_CASE(5, bp, value); \
+ GPR_ ## GS ## ET_CASE(6, si, value); \
+ GPR_ ## GS ## ET_CASE(7, di, value);
+
+#define GPR_64_CASES(GS, value) \
+ GPR_ ## GS ## ET_CASE(8, 8, value); \
+ GPR_ ## GS ## ET_CASE(9, 9, value); \
+ GPR_ ## GS ## ET_CASE(10, 10, value); \
+ GPR_ ## GS ## ET_CASE(11, 11, value); \
+ GPR_ ## GS ## ET_CASE(12, 12, value); \
+ GPR_ ## GS ## ET_CASE(13, 13, value); \
+ GPR_ ## GS ## ET_CASE(14, 14, value); \
+ GPR_ ## GS ## ET_CASE(15, 15, value);
+
+int hvm_get_gpr(struct cpu_user_regs *regs, int gpr,
+ unsigned long *valueptr)
+{
+ uint64_t mask;
+ unsigned long value;
+ int width = 0;
+
+ if (width == 0) {
+ mask = (unsigned long)-1;
+ } else {
+ if (width >= 64)
+ mask = (uint64_t)-1;
+ else
+ mask = (1ULL << width) - 1;
+ }
+
+ switch(gpr) {
+ GPR_CASES(G, value)
+#ifndef __i386__
+ GPR_64_CASES(G, value)
+#endif
+ default: return 1;
+ }
+ *valueptr = value & mask;
+ return 0;
+}
+
+int hvm_set_gpr(struct cpu_user_regs *regs, int gpr, unsigned long value)
+{
+ switch(gpr) {
+ GPR_CASES(S, value)
+#ifndef __i386__
+ GPR_64_CASES(S, value)
+#endif
+ default: return 1;
+ }
+ return 0;
+}
+
int hvm_set_cr0(unsigned long value)
{
struct vcpu *v = current;
@@ -4082,6 +4150,85 @@ enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
return hvm_funcs.nhvm_intr_blocked(v);
}
+int hvm_mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
+{
+ unsigned long value;
+ struct vcpu *v = current;
+ struct vlapic *vlapic = vcpu_vlapic(v);
+
+ if (hvm_get_gpr(regs, gp, &value)) {
+ gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
+ goto exit_and_crash;
+ }
+
+ HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(value));
+
+ HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
+
+ switch ( cr )
+ {
+ case 0:
+ return !hvm_set_cr0(value);
+
+ case 3:
+ return !hvm_set_cr3(value);
+
+ case 4:
+ return !hvm_set_cr4(value);
+
+ case 8:
+ vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
+ break;
+
+ default:
+ gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
+ goto exit_and_crash;
+ }
+
+ return 1;
+
+ exit_and_crash:
+ domain_crash(v->domain);
+ return 0;
+}
+
+/*
+ * Read from control registers. CR0 and CR4 are handled before.
+ */
+void hvm_mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
+{
+ unsigned long value = 0;
+ struct vcpu *v = current;
+ struct vlapic *vlapic = vcpu_vlapic(v);
+
+ switch ( cr )
+ {
+ case 0:
+ case 2:
+ case 3:
+ case 4:
+ value = (unsigned long)v->arch.hvm_vcpu.guest_cr[cr];
+ break;
+ case 8:
+ value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
+ value = (value & 0xF0) >> 4;
+ break;
+ default:
+ gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
+ domain_crash(v->domain);
+ break;
+ }
+
+ if (hvm_set_gpr(regs, gp, value) != 0) {
+ printk("invalid gp: %d\n", gp);
+ domain_crash(v->domain);
+ }
+
+ HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(value));
+
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index cda6420..e6574b0 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1554,142 +1554,6 @@ static void vmx_invlpg_intercept(unsigned long vaddr)
vpid_sync_vcpu_gva(curr, vaddr);
}
-#define CASE_SET_REG(REG, reg) \
- case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: regs->reg = value; break
-#define CASE_GET_REG(REG, reg) \
- case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: value = regs->reg; break
-
-#define CASE_EXTEND_SET_REG \
- CASE_EXTEND_REG(S)
-#define CASE_EXTEND_GET_REG \
- CASE_EXTEND_REG(G)
-
-#ifdef __i386__
-#define CASE_EXTEND_REG(T)
-#else
-#define CASE_EXTEND_REG(T) \
- CASE_ ## T ## ET_REG(R8, r8); \
- CASE_ ## T ## ET_REG(R9, r9); \
- CASE_ ## T ## ET_REG(R10, r10); \
- CASE_ ## T ## ET_REG(R11, r11); \
- CASE_ ## T ## ET_REG(R12, r12); \
- CASE_ ## T ## ET_REG(R13, r13); \
- CASE_ ## T ## ET_REG(R14, r14); \
- CASE_ ## T ## ET_REG(R15, r15)
-#endif
-
-static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
-{
- unsigned long value;
- struct vcpu *v = current;
- struct vlapic *vlapic = vcpu_vlapic(v);
- int rc = 0;
- unsigned long old;
-
- switch ( gp )
- {
- CASE_GET_REG(EAX, eax);
- CASE_GET_REG(ECX, ecx);
- CASE_GET_REG(EDX, edx);
- CASE_GET_REG(EBX, ebx);
- CASE_GET_REG(EBP, ebp);
- CASE_GET_REG(ESI, esi);
- CASE_GET_REG(EDI, edi);
- CASE_GET_REG(ESP, esp);
- CASE_EXTEND_GET_REG;
- default:
- gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
- goto exit_and_crash;
- }
-
- HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(value));
-
- HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
-
- switch ( cr )
- {
- case 0:
- old = v->arch.hvm_vcpu.guest_cr[0];
- rc = !hvm_set_cr0(value);
- if (rc)
- hvm_memory_event_cr0(value, old);
- return rc;
-
- case 3:
- old = v->arch.hvm_vcpu.guest_cr[3];
- rc = !hvm_set_cr3(value);
- if (rc)
- hvm_memory_event_cr3(value, old);
- return rc;
-
- case 4:
- old = v->arch.hvm_vcpu.guest_cr[4];
- rc = !hvm_set_cr4(value);
- if (rc)
- hvm_memory_event_cr4(value, old);
- return rc;
-
- case 8:
- vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
- break;
-
- default:
- gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
- goto exit_and_crash;
- }
-
- return 1;
-
- exit_and_crash:
- domain_crash(v->domain);
- return 0;
-}
-
-/*
- * Read from control registers. CR0 and CR4 are read from the shadow.
- */
-static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
-{
- unsigned long value = 0;
- struct vcpu *v = current;
- struct vlapic *vlapic = vcpu_vlapic(v);
-
- switch ( cr )
- {
- case 3:
- value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
- break;
- case 8:
- value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
- value = (value & 0xF0) >> 4;
- break;
- default:
- gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
- domain_crash(v->domain);
- break;
- }
-
- switch ( gp ) {
- CASE_SET_REG(EAX, eax);
- CASE_SET_REG(ECX, ecx);
- CASE_SET_REG(EDX, edx);
- CASE_SET_REG(EBX, ebx);
- CASE_SET_REG(EBP, ebp);
- CASE_SET_REG(ESI, esi);
- CASE_SET_REG(EDI, edi);
- CASE_SET_REG(ESP, esp);
- CASE_EXTEND_SET_REG;
- default:
- printk("invalid gp: %d\n", gp);
- domain_crash(v->domain);
- break;
- }
-
- HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(value));
-
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
-}
-
static int vmx_cr_access(unsigned long exit_qualification,
struct cpu_user_regs *regs)
{
@@ -1702,11 +1566,11 @@ static int vmx_cr_access(unsigned long exit_qualification,
case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
- return mov_to_cr(gp, cr, regs);
+ return hvm_mov_to_cr(gp, cr, regs);
case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR:
gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
- mov_from_cr(cr, gp, regs);
+ hvm_mov_from_cr(cr, gp, regs);
break;
case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
{
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 12bd8a8..13f3a4f 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -311,6 +311,23 @@ static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \
(xsave_enabled(_v) ? X86_CR4_OSXSAVE : 0))))
+#define HVM_GPR_NR_EAX 0
+#define HVM_GPR_NR_ECX 1
+#define HVM_GPR_NR_EDX 2
+#define HVM_GPR_NR_EBX 3
+#define HVM_GPR_NR_ESP 4
+#define HVM_GPR_NR_EBP 5
+#define HVM_GPR_NR_ESI 6
+#define HVM_GPR_NR_EDI 7
+#define HVM_GPR_NR_R8 8
+#define HVM_GPR_NR_R9 9
+#define HVM_GPR_NR_R10 10
+#define HVM_GPR_NR_R11 11
+#define HVM_GPR_NR_R12 12
+#define HVM_GPR_NR_R13 13
+#define HVM_GPR_NR_R14 14
+#define HVM_GPR_NR_R15 15
+
/* These exceptions must always be intercepted. */
#define HVM_TRAP_MASK ((1U << TRAP_machine_check) | (1U << TRAP_invalid_op))
@@ -389,6 +406,8 @@ int hvm_hap_nested_page_fault(unsigned long gpa,
int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t msr_content);
+int hvm_mov_to_cr(int gp, int cr, struct cpu_user_regs *regs);
+void hvm_mov_from_cr(int cr, int gp, struct cpu_user_regs *regs);
#ifdef __x86_64__
/* Called for current VCPU on crX changes by guest */
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 92e96e3..9a9e10f 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -129,6 +129,8 @@ void hvm_triple_fault(void);
void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
int hvm_handle_xsetbv(u64 new_bv);
+int hvm_get_gpr(struct cpu_user_regs *regs, int gpr, unsigned long *valueptr);
+int hvm_set_gpr(struct cpu_user_regs *regs, int gpr, unsigned long value);
/* These functions all return X86EMUL return codes. */
int hvm_set_efer(uint64_t value);
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 8e685e4..dcadc0e 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -153,22 +153,6 @@ void vmx_update_cpu_exec_control(struct vcpu *v);
#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR (1 << 4)
#define VMX_CONTROL_REG_ACCESS_TYPE_CLTS (2 << 4)
#define VMX_CONTROL_REG_ACCESS_TYPE_LMSW (3 << 4)
-#define VMX_CONTROL_REG_ACCESS_GPR_EAX (0 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ECX (1 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EDX (2 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EBX (3 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ESP (4 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EBP (5 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ESI (6 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EDI (7 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R8 (8 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R9 (9 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R10 (10 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R11 (11 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R12 (12 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R13 (13 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R14 (14 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R15 (15 << 8)
/*
* Access Rights
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
next prev parent reply other threads:[~2011-04-15 12:21 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-04-15 12:16 [PATCH 0/5] svm: implement new DecodeAssist feature Andre Przywara
2011-04-15 12:21 ` Andre Przywara [this message]
2011-04-18 8:48 ` [PATCH 1/5] vmx/hvm: move mov-cr handling functions to generic HVM code Keir Fraser
2011-04-15 12:21 ` [PATCH 2/5] svm: add bit definitions for SVM DecodeAssist Andre Przywara
2011-04-15 12:22 ` [PATCH 3/5] svm: implement instruction fetch part of DecodeAssist Andre Przywara
2011-04-18 9:10 ` Keir Fraser
2011-04-18 12:37 ` Keir Fraser
2011-04-15 12:23 ` [PATCH 4/5] svm: implement CR access " Andre Przywara
2011-04-15 12:24 ` [PATCH 5/5] svm: implement INVLPG " Andre Przywara
2011-04-18 9:07 ` Keir Fraser
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4DA8383D.2050800@amd.com \
--to=andre.przywara@amd.com \
--cc=keir.fraser@eu.citrix.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).