From: Mukesh Rathor <mukesh.rathor@oracle.com>
To: Xen-devel@lists.xensource.com
Subject: [V11 PATCH 20/21] PVH xen: introduce vmexit handler for PVH
Date: Thu, 22 Aug 2013 18:19:09 -0700 [thread overview]
Message-ID: <1377220750-19514-21-git-send-email-mukesh.rathor@oracle.com> (raw)
In-Reply-To: <1377220750-19514-1-git-send-email-mukesh.rathor@oracle.com>
This patch contains vmx exit handler for PVH guest. Note it contains
a macro dbgp1 to print vmexit reasons and a lot of other data
to go with it. It can be enabled by setting pvhdbg to 1. This can be
very useful debugging for the first few months of testing, after which
it can be removed at the maintainer's discretion.
Changes in V2:
- Move non VMX generic code to arch/x86/hvm/pvh.c
- Remove get_gpr_ptr() and use existing decode_register() instead.
- Defer call to pvh vmx exit handler until interrupts are enabled. So the
caller vmx_pvh_vmexit_handler() handles the NMI/EXT-INT/TRIPLE_FAULT now.
- Fix the CPUID (wrongly) clearing bit 24. No need to do this now, set
the correct feature bits in CR4 during vmcs creation.
- Fix few hard tabs.
Changes in V3:
- Lot of cleanup and rework in PVH vm exit handler.
- add parameter to emulate_forced_invalid_op().
Changes in V5:
- Move pvh.c and emulate_forced_invalid_op related changes to another patch.
- Formatting.
- Remove vmx_pvh_read_descriptor().
- Use SS DPL instead of CS.RPL for CPL.
- Remove pvh_user_cpuid() and call pv_cpuid for user mode also.
Changes in V6:
- Replace domain_crash_synchronous() with domain_crash().
Changes in V7:
- Don't read all selectors on every vmexit. Do that only for the
IO instruction vmexit.
- Add couple checks and set guest_cr[4] in access_cr4().
- Add period after all comments in case that's an issue.
- Move making pv_cpuid and emulate_privileged_op public here.
Changes in V8:
- Mainly, don't read selectors on vmexit. The macros now come to VMCS
to read selectors on demand.
Changes in V11:
- merge this with previous patch "prep changes".
- allow invalid op emulation for kernel mode also.
- Use CR0_READ_SHADOW instead of GUEST_CR0.
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Acked-by: Keir Fraser <keir@xen.org>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
PV-HVM-Regression-Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/hvm/hvm.c | 3 +-
xen/arch/x86/hvm/vmx/pvh.c | 468 +++++++++++++++++++++++++++++++++++++
xen/arch/x86/hvm/vmx/vmx.c | 6 +
xen/arch/x86/traps.c | 6 +-
xen/include/asm-x86/hvm/vmx/vmx.h | 1 +
xen/include/asm-x86/processor.h | 2 +
xen/include/asm-x86/traps.h | 3 +
7 files changed, 485 insertions(+), 4 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4769420..372c2db 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3038,7 +3038,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
- hvm_memory_event_msr(msr, msr_content);
+ if ( !is_pvh_vcpu(v) )
+ hvm_memory_event_msr(msr, msr_content);
switch ( msr )
{
diff --git a/xen/arch/x86/hvm/vmx/pvh.c b/xen/arch/x86/hvm/vmx/pvh.c
index 526ce2b..23e7d17 100644
--- a/xen/arch/x86/hvm/vmx/pvh.c
+++ b/xen/arch/x86/hvm/vmx/pvh.c
@@ -20,6 +20,474 @@
#include <asm/hvm/nestedhvm.h>
#include <asm/xstate.h>
+#ifndef NDEBUG
+static int pvhdbg = 0;
+#define dbgp1(...) do { (pvhdbg == 1) ? printk(__VA_ARGS__) : 0; } while ( 0 )
+#else
+#define dbgp1(...) ((void)0)
+#endif
+
+/* Returns : 0 == msr read successfully. */
+static int vmxit_msr_read(struct cpu_user_regs *regs)
+{
+ u64 msr_content = 0;
+
+ switch ( regs->ecx )
+ {
+ case MSR_IA32_MISC_ENABLE:
+ rdmsrl(MSR_IA32_MISC_ENABLE, msr_content);
+ msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
+ MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
+ break;
+
+ default:
+ /* PVH fixme: see hvm_msr_read_intercept(). */
+ rdmsrl(regs->ecx, msr_content);
+ break;
+ }
+ regs->eax = (uint32_t)msr_content;
+ regs->edx = (uint32_t)(msr_content >> 32);
+ vmx_update_guest_eip();
+
+ dbgp1("msr read c:%#lx a:%#lx d:%#lx RIP:%#lx RSP:%#lx\n", regs->ecx,
+ regs->eax, regs->edx, regs->rip, regs->rsp);
+
+ return 0;
+}
+
+/* Returns : 0 == msr written successfully. */
+static int vmxit_msr_write(struct cpu_user_regs *regs)
+{
+ uint64_t msr_content = regs->eax | (regs->edx << 32);
+
+ dbgp1("PVH: msr write:%#lx. eax:%#lx edx:%#lx\n", regs->ecx,
+ regs->eax, regs->edx);
+
+ if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY )
+ {
+ vmx_update_guest_eip();
+ return 0;
+ }
+ return 1;
+}
+
+static int vmxit_debug(struct cpu_user_regs *regs)
+{
+ struct vcpu *vp = current;
+ unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION);
+
+ write_debugreg(6, exit_qualification | 0xffff0ff0);
+
+ /* gdbsx or another debugger. Never pause dom0. */
+ if ( vp->domain->domain_id != 0 && vp->domain->debugger_attached )
+ domain_pause_for_debugger();
+ else
+ hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
+
+ return 0;
+}
+
+/* Returns: rc == 0: handled the MTF vmexit. */
+static int vmxit_mtf(struct cpu_user_regs *regs)
+{
+ struct vcpu *vp = current;
+ int rc = -EINVAL, ss = vp->arch.hvm_vcpu.single_step;
+
+ vp->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vp->arch.hvm_vmx.exec_control);
+ vp->arch.hvm_vcpu.single_step = 0;
+
+ if ( vp->domain->debugger_attached && ss )
+ {
+ domain_pause_for_debugger();
+ rc = 0;
+ }
+ return rc;
+}
+
+static int vmxit_int3(struct cpu_user_regs *regs)
+{
+ int ilen = vmx_get_instruction_length();
+ struct vcpu *vp = current;
+ struct hvm_trap trap_info = {
+ .vector = TRAP_int3,
+ .type = X86_EVENTTYPE_SW_EXCEPTION,
+ .error_code = HVM_DELIVER_NO_ERROR_CODE,
+ .insn_len = ilen
+ };
+
+ /* gdbsx or another debugger. Never pause dom0. */
+ if ( vp->domain->domain_id != 0 && vp->domain->debugger_attached )
+ {
+ regs->eip += ilen;
+ dbgp1("[%d]PVH: domain pause for debugger\n", smp_processor_id());
+ current->arch.gdbsx_vcpu_event = TRAP_int3;
+ domain_pause_for_debugger();
+ return 0;
+ }
+ hvm_inject_trap(&trap_info);
+
+ return 0;
+}
+
+/*
+ * Just like HVM, PVH should be using "cpuid" from the kernel mode.
+ * While it's allowed, it's depracated.
+ */
+static int vmxit_invalid_op(struct cpu_user_regs *regs)
+{
+ if ( !emulate_forced_invalid_op(regs) )
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+
+ return 0;
+}
+
+/* Returns: rc == 0: handled the exception. */
+static int vmxit_exception(struct cpu_user_regs *regs)
+{
+ int vector = (__vmread(VM_EXIT_INTR_INFO)) & INTR_INFO_VECTOR_MASK;
+ int rc = -ENOSYS;
+
+ dbgp1(" EXCPT: vec:%#x cs:%#lx rip:%#lx\n", vector,
+ __vmread(GUEST_CS_SELECTOR), regs->eip);
+
+ switch ( vector )
+ {
+ case TRAP_debug:
+ rc = vmxit_debug(regs);
+ break;
+
+ case TRAP_int3:
+ rc = vmxit_int3(regs);
+ break;
+
+ case TRAP_invalid_op:
+ rc = vmxit_invalid_op(regs);
+ break;
+
+ case TRAP_no_device:
+ hvm_funcs.fpu_dirty_intercept();
+ rc = 0;
+ break;
+
+ default:
+ printk(XENLOG_G_WARNING
+ "PVH: Unhandled trap:%#x RIP:%#lx\n", vector, regs->eip);
+ }
+ return rc;
+}
+
+static int vmxit_vmcall(struct cpu_user_regs *regs)
+{
+ if ( hvm_do_hypercall(regs) != HVM_HCALL_preempted )
+ vmx_update_guest_eip();
+ return 0;
+}
+
+/* Returns: rc == 0: success. */
+static int access_cr0(struct cpu_user_regs *regs, uint acc_typ, uint64_t *regp)
+{
+ struct vcpu *vp = current;
+
+ if ( acc_typ == VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR )
+ {
+ unsigned long new_cr0 = *regp;
+ unsigned long old_cr0 = __vmread(GUEST_CR0);
+
+ dbgp1("PVH:writing to CR0. RIP:%#lx val:%#lx\n", regs->rip, *regp);
+ if ( (u32)new_cr0 != new_cr0 )
+ {
+ printk(XENLOG_G_WARNING
+ "Guest setting upper 32 bits in CR0: %#lx", new_cr0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ return 0;
+ }
+
+ new_cr0 &= ~HVM_CR0_GUEST_RESERVED_BITS;
+ /* ET is reserved and should always be 1. */
+ new_cr0 |= X86_CR0_ET;
+
+ /* A pvh is not expected to change to real mode. */
+ if ( (new_cr0 & (X86_CR0_PE | X86_CR0_PG)) !=
+ (X86_CR0_PG | X86_CR0_PE) )
+ {
+ printk(XENLOG_G_WARNING
+ "PVH attempting to turn off PE/PG. CR0:%#lx\n", new_cr0);
+ return -EPERM;
+ }
+ /* TS going from 1 to 0 */
+ if ( (old_cr0 & X86_CR0_TS) && ((new_cr0 & X86_CR0_TS) == 0) )
+ vmx_fpu_enter(vp);
+
+ vp->arch.hvm_vcpu.hw_cr[0] = vp->arch.hvm_vcpu.guest_cr[0] = new_cr0;
+ __vmwrite(GUEST_CR0, new_cr0);
+ __vmwrite(CR0_READ_SHADOW, new_cr0);
+ }
+ else
+ *regp = __vmread(CR0_READ_SHADOW);
+
+ return 0;
+}
+
+/* Returns: rc == 0: success. */
+static int access_cr4(struct cpu_user_regs *regs, uint acc_typ, uint64_t *regp)
+{
+ if ( acc_typ == VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR )
+ {
+ struct vcpu *vp = current;
+ u64 old_val = __vmread(GUEST_CR4);
+ u64 new = *regp;
+
+ if ( new & HVM_CR4_GUEST_RESERVED_BITS(vp) )
+ {
+ printk(XENLOG_G_WARNING
+ "PVH guest attempts to set reserved bit in CR4: %#lx", new);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ return 0;
+ }
+
+ if ( !(new & X86_CR4_PAE) && hvm_long_mode_enabled(vp) )
+ {
+ printk(XENLOG_G_WARNING "Guest cleared CR4.PAE while "
+ "EFER.LMA is set");
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ return 0;
+ }
+
+ /* hw_cr[4] is not used for PVH anywhere */
+ vp->arch.hvm_vcpu.guest_cr[4] = new;
+
+ if ( (old_val ^ new) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
+ vpid_sync_all();
+
+ __vmwrite(CR4_READ_SHADOW, new);
+
+ new |= X86_CR4_VMXE | X86_CR4_MCE;
+ __vmwrite(GUEST_CR4, new);
+ }
+ else
+ *regp = __vmread(CR4_READ_SHADOW);
+
+ return 0;
+}
+
+/* Returns: rc == 0: success, else -errno. */
+static int vmxit_cr_access(struct cpu_user_regs *regs)
+{
+ unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION);
+ uint acc_typ = VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification);
+ int cr, rc = -EINVAL;
+
+ switch ( acc_typ )
+ {
+ case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
+ case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR:
+ {
+ uint gpr = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
+ uint64_t *regp = decode_register(gpr, regs, 0);
+ cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
+
+ if ( regp == NULL )
+ break;
+
+ switch ( cr )
+ {
+ case 0:
+ rc = access_cr0(regs, acc_typ, regp);
+ break;
+
+ case 3:
+ printk(XENLOG_G_ERR "PVH: unexpected cr3 vmexit. rip:%#lx\n",
+ regs->rip);
+ domain_crash(current->domain);
+ break;
+
+ case 4:
+ rc = access_cr4(regs, acc_typ, regp);
+ break;
+ }
+ if ( rc == 0 )
+ vmx_update_guest_eip();
+ break;
+ }
+
+ case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
+ {
+ struct vcpu *vp = current;
+ unsigned long cr0 = vp->arch.hvm_vcpu.guest_cr[0] & ~X86_CR0_TS;
+ vp->arch.hvm_vcpu.hw_cr[0] = vp->arch.hvm_vcpu.guest_cr[0] = cr0;
+
+ vmx_fpu_enter(vp);
+ __vmwrite(GUEST_CR0, cr0);
+ __vmwrite(CR0_READ_SHADOW, cr0);
+ vmx_update_guest_eip();
+ rc = 0;
+ break;
+ }
+
+ case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
+ {
+ uint64_t value = current->arch.hvm_vcpu.guest_cr[0];
+ /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
+ value = (value & ~0xe) | ((exit_qualification >> 16) & 0xf);
+ rc = access_cr0(regs, VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR, &value);
+ break;
+ }
+ }
+ return rc;
+}
+
+/*
+ * Note: A PVH guest sets IOPL natively by setting bits in the eflags, and not
+ * via hypercalls used by a PV.
+ */
+static int vmxit_io_instr(struct cpu_user_regs *regs)
+{
+ struct segment_register seg;
+ int requested = (regs->rflags & X86_EFLAGS_IOPL) >> 12;
+ int curr_lvl = (regs->rflags & X86_EFLAGS_VM) ? 3 : 0;
+
+ if ( curr_lvl == 0 )
+ {
+ hvm_get_segment_register(current, x86_seg_ss, &seg);
+ curr_lvl = seg.attr.fields.dpl;
+ }
+ if ( requested >= curr_lvl && emulate_privileged_op(regs) )
+ return 0;
+
+ hvm_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ return 0;
+}
+
+static int pvh_ept_handle_violation(unsigned long qualification,
+ paddr_t gpa, struct cpu_user_regs *regs)
+{
+ unsigned long gla, gfn = gpa >> PAGE_SHIFT;
+ p2m_type_t p2mt;
+ mfn_t mfn = get_gfn_query_unlocked(current->domain, gfn, &p2mt);
+
+ printk(XENLOG_G_ERR "EPT violation %#lx (%c%c%c/%c%c%c), "
+ "gpa %#"PRIpaddr", mfn %#lx, type %i. RIP:%#lx RSP:%#lx\n",
+ qualification,
+ (qualification & EPT_READ_VIOLATION) ? 'r' : '-',
+ (qualification & EPT_WRITE_VIOLATION) ? 'w' : '-',
+ (qualification & EPT_EXEC_VIOLATION) ? 'x' : '-',
+ (qualification & EPT_EFFECTIVE_READ) ? 'r' : '-',
+ (qualification & EPT_EFFECTIVE_WRITE) ? 'w' : '-',
+ (qualification & EPT_EFFECTIVE_EXEC) ? 'x' : '-',
+ gpa, mfn_x(mfn), p2mt, regs->rip, regs->rsp);
+
+ ept_walk_table(current->domain, gfn);
+
+ if ( qualification & EPT_GLA_VALID )
+ {
+ gla = __vmread(GUEST_LINEAR_ADDRESS);
+ printk(XENLOG_G_ERR " --- GLA %#lx\n", gla);
+ }
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ return 0;
+}
+
+/*
+ * Main vm exit handler for PVH . Called from vmx_vmexit_handler().
+ * Note: vmx_asm_vmexit_handler updates rip/rsp/eflags in the regs{} struct.
+ */
+void vmx_pvh_vmexit_handler(struct cpu_user_regs *regs)
+{
+ unsigned long exit_qualification;
+ unsigned int exit_reason = __vmread(VM_EXIT_REASON);
+ int rc=0, ccpu = smp_processor_id();
+ struct vcpu *v = current;
+
+ dbgp1("PVH:[%d]left VMCS exitreas:%d RIP:%#lx RSP:%#lx EFLAGS:%#lx "
+ "CR0:%#lx\n", ccpu, exit_reason, regs->rip, regs->rsp, regs->rflags,
+ __vmread(GUEST_CR0));
+
+ switch ( (uint16_t)exit_reason )
+ {
+ /* NMI and machine_check are handled by the caller, we handle rest here */
+ case EXIT_REASON_EXCEPTION_NMI: /* 0 */
+ rc = vmxit_exception(regs);
+ break;
+
+ case EXIT_REASON_EXTERNAL_INTERRUPT: /* 1 */
+ break; /* handled in vmx_vmexit_handler() */
+
+ case EXIT_REASON_PENDING_VIRT_INTR: /* 7 */
+ /* Disable the interrupt window. */
+ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ break;
+
+ case EXIT_REASON_CPUID: /* 10 */
+ pv_cpuid(regs);
+ vmx_update_guest_eip();
+ break;
+
+ case EXIT_REASON_HLT: /* 12 */
+ vmx_update_guest_eip();
+ hvm_hlt(regs->eflags);
+ break;
+
+ case EXIT_REASON_VMCALL: /* 18 */
+ rc = vmxit_vmcall(regs);
+ break;
+
+ case EXIT_REASON_CR_ACCESS: /* 28 */
+ rc = vmxit_cr_access(regs);
+ break;
+
+ case EXIT_REASON_DR_ACCESS: /* 29 */
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
+ vmx_dr_access(exit_qualification, regs);
+ break;
+
+ case EXIT_REASON_IO_INSTRUCTION: /* 30 */
+ vmxit_io_instr(regs);
+ break;
+
+ case EXIT_REASON_MSR_READ: /* 31 */
+ rc = vmxit_msr_read(regs);
+ break;
+
+ case EXIT_REASON_MSR_WRITE: /* 32 */
+ rc = vmxit_msr_write(regs);
+ break;
+
+ case EXIT_REASON_MONITOR_TRAP_FLAG: /* 37 */
+ rc = vmxit_mtf(regs);
+ break;
+
+ case EXIT_REASON_MCE_DURING_VMENTRY: /* 41 */
+ break; /* handled in vmx_vmexit_handler() */
+
+ case EXIT_REASON_EPT_VIOLATION: /* 48 */
+ {
+ paddr_t gpa = __vmread(GUEST_PHYSICAL_ADDRESS);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
+ rc = pvh_ept_handle_violation(exit_qualification, gpa, regs);
+ break;
+ }
+
+ default:
+ rc = 1;
+ printk(XENLOG_G_ERR
+ "PVH: Unexpected exit reason:%#x\n", exit_reason);
+ }
+
+ if ( rc )
+ {
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
+ printk(XENLOG_G_WARNING
+ "PVH: [%d] exit_reas:%d %#x qual:%ld %#lx cr0:%#016lx\n",
+ ccpu, exit_reason, exit_reason, exit_qualification,
+ exit_qualification, __vmread(GUEST_CR0));
+ printk(XENLOG_G_WARNING "PVH: RIP:%#lx RSP:%#lx EFLGS:%#lx CR3:%#lx\n",
+ regs->rip, regs->rsp, regs->rflags, __vmread(GUEST_CR3));
+ domain_crash(v->domain);
+ }
+}
+
/*
* Set vmcs fields during boot of a vcpu. Called from arch_set_info_guest.
*
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 93775dd..54d0d14 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2474,6 +2474,12 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
return vmx_failed_vmentry(exit_reason, regs);
+ if ( is_pvh_vcpu(v) )
+ {
+ vmx_pvh_vmexit_handler(regs);
+ return;
+ }
+
if ( v->arch.hvm_vmx.vmx_realmode )
{
/* Put RFLAGS back the way the guest wants it */
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 2a3f517..c0aafb4 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -746,7 +746,7 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
return 1;
}
-static void pv_cpuid(struct cpu_user_regs *regs)
+void pv_cpuid(struct cpu_user_regs *regs)
{
uint32_t a, b, c, d;
@@ -923,7 +923,7 @@ static int emulate_invalid_rdtscp(struct cpu_user_regs *regs)
return EXCRET_fault_fixed;
}
-static int emulate_forced_invalid_op(struct cpu_user_regs *regs)
+int emulate_forced_invalid_op(struct cpu_user_regs *regs)
{
char sig[5], instr[2];
unsigned long eip, rc;
@@ -1909,7 +1909,7 @@ static int is_cpufreq_controller(struct domain *d)
#include "x86_64/mmconfig.h"
-static int emulate_privileged_op(struct cpu_user_regs *regs)
+int emulate_privileged_op(struct cpu_user_regs *regs)
{
enum x86_segment which_sel;
struct vcpu *v = current;
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 3ad2188..78fb7fb 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -475,6 +475,7 @@ void vmx_dr_access(unsigned long exit_qualification,
void vmx_fpu_enter(struct vcpu *v);
int vmx_pvh_vcpu_boot_set_info(struct vcpu *v,
struct vcpu_guest_context *ctxtp);
+void vmx_pvh_vmexit_handler(struct cpu_user_regs *regs);
int alloc_p2m_hap_data(struct p2m_domain *p2m);
void free_p2m_hap_data(struct p2m_domain *p2m);
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 5cdacc7..22a9653 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -566,6 +566,8 @@ void microcode_set_module(unsigned int);
int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void), unsigned long len);
int microcode_resume_cpu(int cpu);
+void pv_cpuid(struct cpu_user_regs *regs);
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_X86_PROCESSOR_H */
diff --git a/xen/include/asm-x86/traps.h b/xen/include/asm-x86/traps.h
index 82cbcee..20c9151 100644
--- a/xen/include/asm-x86/traps.h
+++ b/xen/include/asm-x86/traps.h
@@ -49,4 +49,7 @@ extern int guest_has_trap_callback(struct domain *d, uint16_t vcpuid,
extern int send_guest_trap(struct domain *d, uint16_t vcpuid,
unsigned int trap_nr);
+int emulate_privileged_op(struct cpu_user_regs *regs);
+int emulate_forced_invalid_op(struct cpu_user_regs *regs);
+
#endif /* ASM_TRAP_H */
--
1.7.2.3
next prev parent reply other threads:[~2013-08-23 1:19 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-08-23 1:18 [V11 PATCH 00/21]PVH xen: Phase I, Version 11 patches Mukesh Rathor
2013-08-23 1:18 ` [V11 PATCH 01/21] PVH xen: Add readme docs/misc/pvh-readme.txt Mukesh Rathor
2013-08-23 1:18 ` [V11 PATCH 02/21] PVH xen: add params to read_segment_register Mukesh Rathor
2013-08-23 1:18 ` [V11 PATCH 03/21] PVH xen: Move e820 fields out of pv_domain struct Mukesh Rathor
2013-08-23 1:18 ` [V11 PATCH 04/21] PVH xen: hvm related preparatory changes for PVH Mukesh Rathor
2013-08-23 1:18 ` [V11 PATCH 05/21] PVH xen: vmx " Mukesh Rathor
2013-08-23 1:18 ` [V11 PATCH 06/21] PVH xen: vmcs " Mukesh Rathor
2013-08-23 1:18 ` [V11 PATCH 07/21] PVH xen: Introduce PVH guest type and some basic changes Mukesh Rathor
2013-08-23 1:18 ` [V11 PATCH 08/21] PVH xen: introduce pvh_vcpu_boot_set_info() and vmx_pvh_vcpu_boot_set_info() Mukesh Rathor
2013-08-23 1:18 ` [V11 PATCH 09/21] PVH xen: domain create, context switch related code changes Mukesh Rathor
2013-08-23 8:12 ` Jan Beulich
2013-08-23 1:18 ` [V11 PATCH 10/21] PVH xen: support invalid op emulation for PVH Mukesh Rathor
2013-08-23 1:19 ` [V11 PATCH 11/21] PVH xen: Support privileged " Mukesh Rathor
2013-08-23 1:19 ` [V11 PATCH 12/21] PVH xen: interrupt/event-channel delivery to PVH Mukesh Rathor
2013-08-23 1:19 ` [V11 PATCH 13/21] PVH xen: additional changes to support PVH guest creation and execution Mukesh Rathor
2013-08-23 1:19 ` [V11 PATCH 14/21] PVH xen: mapcache and show registers Mukesh Rathor
2013-08-23 1:19 ` [V11 PATCH 15/21] PVH xen: mtrr, tsc, timers, grant changes Mukesh Rathor
2013-08-23 1:19 ` [V11 PATCH 16/21] PVH xen: add hypercall support for PVH Mukesh Rathor
2013-08-23 1:19 ` [V11 PATCH 17/21] PVH xen: vmcs related changes Mukesh Rathor
2013-08-23 8:41 ` Jan Beulich
2013-08-24 0:26 ` Mukesh Rathor
2013-08-26 8:15 ` Jan Beulich
2013-08-27 17:00 ` George Dunlap
2013-08-27 22:43 ` Mukesh Rathor
2013-08-23 1:19 ` [V11 PATCH 18/21] PVH xen: HVM support of PVH guest creation/destruction Mukesh Rathor
2013-08-23 1:19 ` [V11 PATCH 19/21] PVH xen: VMX " Mukesh Rathor
2013-08-23 9:14 ` Jan Beulich
2013-08-24 0:27 ` Mukesh Rathor
2013-08-23 1:19 ` Mukesh Rathor [this message]
2013-08-23 9:12 ` [V11 PATCH 20/21] PVH xen: introduce vmexit handler for PVH Jan Beulich
2013-08-24 0:35 ` Mukesh Rathor
2013-08-26 8:22 ` Jan Beulich
2013-08-23 1:19 ` [V11 PATCH 21/21] PVH xen: Checks, asserts, and limitations " Mukesh Rathor
2013-08-23 8:49 ` [V11 PATCH 00/21]PVH xen: Phase I, Version 11 patches Jan Beulich
2013-08-23 11:15 ` George Dunlap
2013-08-23 12:05 ` Jan Beulich
2013-08-24 0:40 ` Mukesh Rathor
2013-08-27 17:05 ` George Dunlap
2013-08-27 19:18 ` Mukesh Rathor
2013-08-28 11:20 ` George Dunlap
2013-08-29 0:16 ` Mukesh Rathor
2013-08-28 0:37 ` Mukesh Rathor
2013-08-29 16:28 ` George Dunlap
2013-08-30 0:25 ` Mukesh Rathor
2013-08-30 11:02 ` George Dunlap
2013-08-30 17:21 ` George Dunlap
2013-08-30 21:22 ` Mukesh Rathor
2013-09-02 14:52 ` George Dunlap
2013-09-06 1:07 ` Mukesh Rathor
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1377220750-19514-21-git-send-email-mukesh.rathor@oracle.com \
--to=mukesh.rathor@oracle.com \
--cc=Xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).