From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
Paul Durrant <paul.durrant@citrix.com>,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Subject: [PATCH v2 1/3] x86/hvm: Don't raise #GP behind the emulators back for MSR accesses
Date: Mon, 20 Feb 2017 10:28:47 +0000 [thread overview]
Message-ID: <1487586529-27092-2-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1487586529-27092-1-git-send-email-andrew.cooper3@citrix.com>
The current hvm_msr_{read,write}_intercept() infrastructure calls
hvm_inject_hw_exception() directly to latch a fault, and returns
X86EMUL_EXCEPTION to its caller.
This behaviour is problematic for the hvmemul_{read,write}_msr() paths, as the
fault is raised behind the back of the x86 emulator.
Alter the behaviour so hvm_msr_{read,write}_intercept() simply returns
X86EMUL_EXCEPTION, leaving the callers to actually inject the #GP fault.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
CC: Paul Durrant <paul.durrant@citrix.com>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
v2:
* Substantial rebase
* Introduce __must_check for hvm_msr_{read,write}_intercept()
---
xen/arch/x86/hvm/emulate.c | 14 ++++++++++++--
xen/arch/x86/hvm/hvm.c | 7 ++++---
xen/arch/x86/hvm/svm/svm.c | 4 ++--
xen/arch/x86/hvm/vmx/vmx.c | 23 ++++++++++++++++++-----
xen/arch/x86/hvm/vmx/vvmx.c | 19 ++++++++++++++-----
xen/include/asm-x86/hvm/support.h | 12 +++++++++---
6 files changed, 59 insertions(+), 20 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 14f9b43..edcae5e 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1544,7 +1544,12 @@ static int hvmemul_read_msr(
uint64_t *val,
struct x86_emulate_ctxt *ctxt)
{
- return hvm_msr_read_intercept(reg, val);
+ int rc = hvm_msr_read_intercept(reg, val);
+
+ if ( rc == X86EMUL_EXCEPTION )
+ x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+
+ return rc;
}
static int hvmemul_write_msr(
@@ -1552,7 +1557,12 @@ static int hvmemul_write_msr(
uint64_t val,
struct x86_emulate_ctxt *ctxt)
{
- return hvm_msr_write_intercept(reg, val, 1);
+ int rc = hvm_msr_write_intercept(reg, val, 1);
+
+ if ( rc == X86EMUL_EXCEPTION )
+ x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+
+ return rc;
}
static int hvmemul_wbinvd(
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6621d62..08855c2 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -518,7 +518,10 @@ void hvm_do_resume(struct vcpu *v)
if ( w->do_write.msr )
{
- hvm_msr_write_intercept(w->msr, w->value, 0);
+ if ( hvm_msr_write_intercept(w->msr, w->value, 0) ==
+ X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
w->do_write.msr = 0;
}
@@ -3455,7 +3458,6 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
return ret;
gp_fault:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
ret = X86EMUL_EXCEPTION;
*msr_content = -1ull;
goto out;
@@ -3600,7 +3602,6 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
return ret;
gp_fault:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 894c457..b864535 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1744,7 +1744,6 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
return X86EMUL_OKAY;
gpf:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
@@ -1897,7 +1896,6 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
return result;
gpf:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
@@ -1924,6 +1922,8 @@ static void svm_do_msr_access(struct cpu_user_regs *regs)
if ( rc == X86EMUL_OKAY )
__update_guest_eip(regs, inst_len);
+ else if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 597d7ac..b5bfa05 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2734,7 +2734,6 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
return X86EMUL_OKAY;
gp_fault:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
@@ -2971,7 +2970,6 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
return X86EMUL_OKAY;
gp_fault:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
@@ -3664,18 +3662,33 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
break;
case EXIT_REASON_MSR_READ:
{
- uint64_t msr_content;
- if ( hvm_msr_read_intercept(regs->_ecx, &msr_content) == X86EMUL_OKAY )
+ uint64_t msr_content = 0;
+
+ switch ( hvm_msr_read_intercept(regs->_ecx, &msr_content) )
{
+ case X86EMUL_OKAY:
msr_split(regs, msr_content);
update_guest_eip(); /* Safe: RDMSR */
+ break;
+
+ case X86EMUL_EXCEPTION:
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ break;
}
break;
}
case EXIT_REASON_MSR_WRITE:
- if ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) == X86EMUL_OKAY )
+ switch ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) )
+ {
+ case X86EMUL_OKAY:
update_guest_eip(); /* Safe: WRMSR */
+ break;
+
+ case X86EMUL_EXCEPTION:
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ break;
+ }
break;
case EXIT_REASON_VMXOFF:
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index f6a25a6..c830d16 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1032,6 +1032,7 @@ static void load_shadow_guest_state(struct vcpu *v)
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
u32 control;
u64 cr_gh_mask, cr_read_shadow;
+ int rc;
static const u16 vmentry_fields[] = {
VM_ENTRY_INTR_INFO,
@@ -1053,8 +1054,12 @@ static void load_shadow_guest_state(struct vcpu *v)
if ( control & VM_ENTRY_LOAD_GUEST_PAT )
hvm_set_guest_pat(v, get_vvmcs(v, GUEST_PAT));
if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
- get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+ {
+ rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ }
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
@@ -1222,7 +1227,7 @@ static void sync_vvmcs_ro(struct vcpu *v)
static void load_vvmcs_host_state(struct vcpu *v)
{
- int i;
+ int i, rc;
u64 r;
u32 control;
@@ -1240,8 +1245,12 @@ static void load_vvmcs_host_state(struct vcpu *v)
if ( control & VM_EXIT_LOAD_HOST_PAT )
hvm_set_guest_pat(v, get_vvmcs(v, HOST_PAT));
if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
- get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+ {
+ rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ }
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 262955d..5e25698 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -121,13 +121,19 @@ int hvm_set_efer(uint64_t value);
int hvm_set_cr0(unsigned long value, bool_t may_defer);
int hvm_set_cr3(unsigned long value, bool_t may_defer);
int hvm_set_cr4(unsigned long value, bool_t may_defer);
-int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
-int hvm_msr_write_intercept(
- unsigned int msr, uint64_t msr_content, bool_t may_defer);
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
void hvm_ud_intercept(struct cpu_user_regs *);
+/*
+ * May return X86EMUL_EXCEPTION, at which point the caller is responsible for
+ * injecting a #GP fault. Used to support speculative reads.
+ */
+int __must_check hvm_msr_read_intercept(
+ unsigned int msr, uint64_t *msr_content);
+int __must_check hvm_msr_write_intercept(
+ unsigned int msr, uint64_t msr_content, bool_t may_defer);
+
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
/*
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-02-20 10:28 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-02-20 10:28 [PATCH v2 0/3] x86/emul: MSR emulation improvements Andrew Cooper
2017-02-20 10:28 ` Andrew Cooper [this message]
2017-02-20 10:34 ` [PATCH v2 1/3] x86/hvm: Don't raise #GP behind the emulators back for MSR accesses Paul Durrant
2017-02-21 13:46 ` Boris Ostrovsky
2017-02-21 13:50 ` Andrew Cooper
2017-02-20 10:28 ` [PATCH v2 2/3] x86/emul: Introduce common msr_val for emulation Andrew Cooper
2017-02-20 10:55 ` Jan Beulich
2017-02-20 10:28 ` [PATCH v2 3/3] x86/emul: Support CPUID faulting via a speculative MSR read Andrew Cooper
2017-02-20 10:32 ` Paul Durrant
2017-02-20 10:59 ` Jan Beulich
2017-02-20 11:04 ` Andrew Cooper
2017-02-20 11:13 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1487586529-27092-2-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=boris.ostrovsky@oracle.com \
--cc=paul.durrant@citrix.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).