From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: [PATCH v3 19/24] x86/hvm: Extend the hvm_copy_*() API with a pagefault_info pointer
Date: Wed, 30 Nov 2016 13:50:36 +0000 [thread overview]
Message-ID: <1480513841-7565-20-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1480513841-7565-1-git-send-email-andrew.cooper3@citrix.com>
which is filled with pagefault information should one occur.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
---
xen/arch/x86/hvm/emulate.c | 8 ++++---
xen/arch/x86/hvm/hvm.c | 49 +++++++++++++++++++++++++--------------
xen/arch/x86/hvm/vmx/vvmx.c | 9 ++++---
xen/arch/x86/mm/shadow/common.c | 5 ++--
xen/include/asm-x86/hvm/support.h | 23 +++++++++++++-----
5 files changed, 63 insertions(+), 31 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 614e182..41f689e 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -770,6 +770,7 @@ static int __hvmemul_read(
struct hvm_emulate_ctxt *hvmemul_ctxt)
{
struct vcpu *curr = current;
+ pagefault_info_t pfinfo;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
@@ -790,8 +791,8 @@ static int __hvmemul_read(
pfec |= PFEC_user_mode;
rc = ((access_type == hvm_access_insn_fetch) ?
- hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec) :
- hvm_copy_from_guest_virt(p_data, addr, bytes, pfec));
+ hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo) :
+ hvm_copy_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo));
switch ( rc )
{
@@ -878,6 +879,7 @@ static int hvmemul_write(
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
struct vcpu *curr = current;
+ pagefault_info_t pfinfo;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
@@ -896,7 +898,7 @@ static int hvmemul_write(
(hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
pfec |= PFEC_user_mode;
- rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec);
+ rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec, &pfinfo);
switch ( rc )
{
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index bdfd94e..390f76d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2859,6 +2859,7 @@ void hvm_task_switch(
struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
bool_t otd_writable, ntd_writable;
unsigned long eflags;
+ pagefault_info_t pfinfo;
int exn_raised, rc;
struct {
u16 back_link,__blh;
@@ -2925,7 +2926,7 @@ void hvm_task_switch(
}
rc = hvm_copy_from_guest_virt(
- &tss, prev_tr.base, sizeof(tss), PFEC_page_present);
+ &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
if ( rc != HVMCOPY_okay )
goto out;
@@ -2963,12 +2964,12 @@ void hvm_task_switch(
&tss.eip,
offsetof(typeof(tss), trace) -
offsetof(typeof(tss), eip),
- PFEC_page_present);
+ PFEC_page_present, &pfinfo);
if ( rc != HVMCOPY_okay )
goto out;
rc = hvm_copy_from_guest_virt(
- &tss, tr.base, sizeof(tss), PFEC_page_present);
+ &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
/*
* Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
* functions knew we want RO access.
@@ -3008,7 +3009,8 @@ void hvm_task_switch(
tss.back_link = prev_tr.sel;
rc = hvm_copy_to_guest_virt(tr.base + offsetof(typeof(tss), back_link),
- &tss.back_link, sizeof(tss.back_link), 0);
+ &tss.back_link, sizeof(tss.back_link), 0,
+ &pfinfo);
if ( rc == HVMCOPY_bad_gva_to_gfn )
exn_raised = 1;
else if ( rc != HVMCOPY_okay )
@@ -3045,7 +3047,8 @@ void hvm_task_switch(
16 << segr.attr.fields.db,
&linear_addr) )
{
- rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0);
+ rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0,
+ &pfinfo);
if ( rc == HVMCOPY_bad_gva_to_gfn )
exn_raised = 1;
else if ( rc != HVMCOPY_okay )
@@ -3068,7 +3071,8 @@ void hvm_task_switch(
#define HVMCOPY_phys (0u<<2)
#define HVMCOPY_virt (1u<<2)
static enum hvm_copy_result __hvm_copy(
- void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec)
+ void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec,
+ pagefault_info_t *pfinfo)
{
struct vcpu *curr = current;
unsigned long gfn;
@@ -3109,7 +3113,15 @@ static enum hvm_copy_result __hvm_copy(
if ( pfec & PFEC_page_shared )
return HVMCOPY_gfn_shared;
if ( flags & HVMCOPY_fault )
+ {
+ if ( pfinfo )
+ {
+ pfinfo->linear = addr;
+ pfinfo->ec = pfec;
+ }
+
hvm_inject_page_fault(pfec, addr);
+ }
return HVMCOPY_bad_gva_to_gfn;
}
gpa |= (paddr_t)gfn << PAGE_SHIFT;
@@ -3279,7 +3291,7 @@ enum hvm_copy_result hvm_copy_to_guest_phys(
{
return __hvm_copy(buf, paddr, size,
HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_phys,
- 0);
+ 0, NULL);
}
enum hvm_copy_result hvm_copy_from_guest_phys(
@@ -3287,31 +3299,34 @@ enum hvm_copy_result hvm_copy_from_guest_phys(
{
return __hvm_copy(buf, paddr, size,
HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_phys,
- 0);
+ 0, NULL);
}
enum hvm_copy_result hvm_copy_to_guest_virt(
- unsigned long vaddr, void *buf, int size, uint32_t pfec)
+ unsigned long vaddr, void *buf, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo)
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_virt,
- PFEC_page_present | PFEC_write_access | pfec);
+ PFEC_page_present | PFEC_write_access | pfec, pfinfo);
}
enum hvm_copy_result hvm_copy_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec)
+ void *buf, unsigned long vaddr, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo)
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
- PFEC_page_present | pfec);
+ PFEC_page_present | pfec, pfinfo);
}
enum hvm_copy_result hvm_fetch_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec)
+ void *buf, unsigned long vaddr, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo)
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
- PFEC_page_present | PFEC_insn_fetch | pfec);
+ PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
}
enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
@@ -3319,7 +3334,7 @@ enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_to_guest | HVMCOPY_no_fault | HVMCOPY_virt,
- PFEC_page_present | PFEC_write_access | pfec);
+ PFEC_page_present | PFEC_write_access | pfec, NULL);
}
enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
@@ -3327,7 +3342,7 @@ enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
- PFEC_page_present | pfec);
+ PFEC_page_present | pfec, NULL);
}
enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
@@ -3335,7 +3350,7 @@ enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
- PFEC_page_present | PFEC_insn_fetch | pfec);
+ PFEC_page_present | PFEC_insn_fetch | pfec, NULL);
}
unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index bcc4a97..7342d12 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -396,6 +396,7 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
struct vcpu *v = current;
union vmx_inst_info info;
struct segment_register seg;
+ pagefault_info_t pfinfo;
unsigned long base, index, seg_base, disp, offset;
int scale, size;
@@ -451,7 +452,7 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
goto gp_fault;
if ( poperandS != NULL &&
- hvm_copy_from_guest_virt(poperandS, base, size, 0)
+ hvm_copy_from_guest_virt(poperandS, base, size, 0, &pfinfo)
!= HVMCOPY_okay )
return X86EMUL_EXCEPTION;
decode->mem = base;
@@ -1611,6 +1612,7 @@ int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
struct vcpu *v = current;
struct vmx_inst_decoded decode;
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+ pagefault_info_t pfinfo;
unsigned long gpa = 0;
int rc;
@@ -1620,7 +1622,7 @@ int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
gpa = nvcpu->nv_vvmcxaddr;
- rc = hvm_copy_to_guest_virt(decode.mem, &gpa, decode.len, 0);
+ rc = hvm_copy_to_guest_virt(decode.mem, &gpa, decode.len, 0, &pfinfo);
if ( rc != HVMCOPY_okay )
return X86EMUL_EXCEPTION;
@@ -1679,6 +1681,7 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
struct vmx_inst_decoded decode;
+ pagefault_info_t pfinfo;
u64 value = 0;
int rc;
@@ -1690,7 +1693,7 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs)
switch ( decode.type ) {
case VMX_INST_MEMREG_TYPE_MEMORY:
- rc = hvm_copy_to_guest_virt(decode.mem, &value, decode.len, 0);
+ rc = hvm_copy_to_guest_virt(decode.mem, &value, decode.len, 0, &pfinfo);
if ( rc != HVMCOPY_okay )
return X86EMUL_EXCEPTION;
break;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index e509cc1..e8501ce 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -179,6 +179,7 @@ hvm_read(enum x86_segment seg,
enum hvm_access_type access_type,
struct sh_emulate_ctxt *sh_ctxt)
{
+ pagefault_info_t pfinfo;
unsigned long addr;
int rc;
@@ -188,9 +189,9 @@ hvm_read(enum x86_segment seg,
return rc;
if ( access_type == hvm_access_insn_fetch )
- rc = hvm_fetch_from_guest_virt(p_data, addr, bytes, 0);
+ rc = hvm_fetch_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
else
- rc = hvm_copy_from_guest_virt(p_data, addr, bytes, 0);
+ rc = hvm_copy_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
switch ( rc )
{
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 9938450..4aa5a36 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -83,16 +83,27 @@ enum hvm_copy_result hvm_copy_from_guest_phys(
* HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
* ordinary machine memory.
* HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
- * mapping to a guest physical address. In this case
- * a page fault exception is automatically queued
- * for injection into the current HVM VCPU.
+ * mapping to a guest physical address. The
+ * pagefault_info_t structure will be filled in if
+ * provided, and a page fault exception is
+ * automatically queued for injection into the
+ * current HVM VCPU.
*/
+typedef struct pagefault_info
+{
+ unsigned long linear;
+ int ec;
+} pagefault_info_t;
+
enum hvm_copy_result hvm_copy_to_guest_virt(
- unsigned long vaddr, void *buf, int size, uint32_t pfec);
+ unsigned long vaddr, void *buf, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo);
enum hvm_copy_result hvm_copy_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec);
+ void *buf, unsigned long vaddr, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo);
enum hvm_copy_result hvm_fetch_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec);
+ void *buf, unsigned long vaddr, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo);
/*
* As above (copy to/from a guest virtual address), but no fault is generated
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-11-30 13:50 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-11-30 13:50 [PATCH for-4.9 v3 00/24] XSA-191 followup Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 01/24] x86/shadow: Fix #PFs from emulated writes crossing a page boundary Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 02/24] x86/emul: Drop X86EMUL_CMPXCHG_FAILED Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 03/24] x86/emul: Simplfy emulation state setup Andrew Cooper
2016-12-08 6:34 ` George Dunlap
2016-11-30 13:50 ` [PATCH v3 04/24] x86/emul: Rename hvm_trap to x86_event and move it into the emulation infrastructure Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 05/24] x86/emul: Rename HVM_DELIVER_NO_ERROR_CODE to X86_EVENT_NO_EC Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 06/24] x86/pv: Implement pv_inject_{event, page_fault, hw_exception}() Andrew Cooper
2016-12-01 10:06 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 07/24] x86/emul: Clean up the naming of the retire union Andrew Cooper
2016-11-30 13:58 ` Paul Durrant
2016-11-30 14:02 ` Andrew Cooper
2016-11-30 14:05 ` Paul Durrant
2016-11-30 16:43 ` Jan Beulich
2016-12-01 10:08 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 08/24] x86/emul: Correct the behaviour of pop %ss and interrupt shadowing Andrew Cooper
2016-12-01 10:18 ` Jan Beulich
2016-12-01 10:51 ` Andrew Cooper
2016-12-01 11:19 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 09/24] x86/emul: Provide a wrapper to x86_emulate() to ASSERT() certain behaviour Andrew Cooper
2016-12-01 10:40 ` Jan Beulich
2016-12-01 10:58 ` Andrew Cooper
2016-12-01 11:21 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 10/24] x86/emul: Always use fault semantics for software events Andrew Cooper
2016-11-30 17:55 ` Boris Ostrovsky
2016-12-01 10:53 ` Jan Beulich
2016-12-01 11:15 ` Andrew Cooper
2016-12-01 11:23 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 11/24] x86/emul: Implement singlestep as a retire flag Andrew Cooper
2016-11-30 14:28 ` Paul Durrant
2016-12-01 11:16 ` Jan Beulich
2016-12-01 11:23 ` Andrew Cooper
2016-12-01 11:33 ` Tim Deegan
2016-12-01 12:05 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 12/24] x86/emul: Remove opencoded exception generation Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 13/24] x86/emul: Rework emulator event injection Andrew Cooper
2016-11-30 14:26 ` Paul Durrant
2016-12-01 11:35 ` Tim Deegan
2016-12-01 12:31 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 14/24] x86/vmx: Use hvm_{get, set}_segment_register() rather than vmx_{get, set}_segment_register() Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 15/24] x86/hvm: Reposition the modification of raw segment data from the VMCB/VMCS Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 16/24] x86/emul: Avoid raising faults behind the emulators back Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 17/24] x86/pv: " Andrew Cooper
2016-12-01 11:50 ` Tim Deegan
2016-12-01 12:57 ` Jan Beulich
2016-12-01 13:12 ` Andrew Cooper
2016-12-01 13:27 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 18/24] x86/shadow: " Andrew Cooper
2016-12-01 11:39 ` Tim Deegan
2016-12-01 11:40 ` Andrew Cooper
2016-12-01 13:00 ` Jan Beulich
2016-12-01 13:15 ` Andrew Cooper
2016-11-30 13:50 ` Andrew Cooper [this message]
2016-11-30 13:50 ` [PATCH v3 20/24] x86/hvm: Reimplement hvm_copy_*_nofault() in terms of no pagefault_info Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 21/24] x86/hvm: Rename hvm_copy_*_guest_virt() to hvm_copy_*_guest_linear() Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 22/24] x86/hvm: Avoid __hvm_copy() raising #PF behind the emulators back Andrew Cooper
2016-11-30 14:29 ` Paul Durrant
2016-11-30 13:50 ` [PATCH v3 23/24] x86/emul: Prepare to allow use of system segments for memory references Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 24/24] x86/emul: Use system-segment relative memory accesses Andrew Cooper
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1480513841-7565-20-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).