From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>
Subject: [PATCH v2 16/19] x86/hvm: Rename hvm_copy_*_guest_virt() to hvm_copy_*_guest_linear()
Date: Mon, 28 Nov 2016 11:13:33 +0000 [thread overview]
Message-ID: <1480331616-6165-17-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1480331616-6165-1-git-send-email-andrew.cooper3@citrix.com>
The functions use linear addresses, not virtual addresses, as no segmentation
is used. (Lots of other code in Xen makes this mistake.)
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
CC: Paul Durrant <paul.durrant@citrix.com>
---
xen/arch/x86/hvm/emulate.c | 12 ++++----
xen/arch/x86/hvm/hvm.c | 60 +++++++++++++++++++--------------------
xen/arch/x86/hvm/vmx/vvmx.c | 6 ++--
xen/arch/x86/mm/shadow/common.c | 8 +++---
xen/include/asm-x86/hvm/support.h | 14 ++++-----
5 files changed, 50 insertions(+), 50 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 5165bb2..efd6d32 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -791,8 +791,8 @@ static int __hvmemul_read(
pfec |= PFEC_user_mode;
rc = ((access_type == hvm_access_insn_fetch) ?
- hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo) :
- hvm_copy_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo));
+ hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
+ hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
switch ( rc )
{
@@ -898,7 +898,7 @@ static int hvmemul_write(
(hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
pfec |= PFEC_user_mode;
- rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec, &pfinfo);
+ rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
switch ( rc )
{
@@ -1947,9 +1947,9 @@ void hvm_emulate_init_per_insn(
hvm_access_insn_fetch,
hvmemul_ctxt->ctxt.addr_size,
&addr) &&
- hvm_fetch_from_guest_virt(hvmemul_ctxt->insn_buf, addr,
- sizeof(hvmemul_ctxt->insn_buf),
- pfec, NULL) == HVMCOPY_okay) ?
+ hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
+ sizeof(hvmemul_ctxt->insn_buf),
+ pfec, NULL) == HVMCOPY_okay) ?
sizeof(hvmemul_ctxt->insn_buf) : 0;
}
else
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5eae06a..37eaee2 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2925,7 +2925,7 @@ void hvm_task_switch(
goto out;
}
- rc = hvm_copy_from_guest_virt(
+ rc = hvm_copy_from_guest_linear(
&tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
if ( rc != HVMCOPY_okay )
goto out;
@@ -2960,15 +2960,15 @@ void hvm_task_switch(
hvm_get_segment_register(v, x86_seg_ldtr, &segr);
tss.ldt = segr.sel;
- rc = hvm_copy_to_guest_virt(prev_tr.base + offsetof(typeof(tss), eip),
- &tss.eip,
- offsetof(typeof(tss), trace) -
- offsetof(typeof(tss), eip),
- PFEC_page_present, &pfinfo);
+ rc = hvm_copy_to_guest_linear(prev_tr.base + offsetof(typeof(tss), eip),
+ &tss.eip,
+ offsetof(typeof(tss), trace) -
+ offsetof(typeof(tss), eip),
+ PFEC_page_present, &pfinfo);
if ( rc != HVMCOPY_okay )
goto out;
- rc = hvm_copy_from_guest_virt(
+ rc = hvm_copy_from_guest_linear(
&tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
/*
* Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
@@ -3008,9 +3008,9 @@ void hvm_task_switch(
regs->eflags |= X86_EFLAGS_NT;
tss.back_link = prev_tr.sel;
- rc = hvm_copy_to_guest_virt(tr.base + offsetof(typeof(tss), back_link),
- &tss.back_link, sizeof(tss.back_link), 0,
- &pfinfo);
+ rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss), back_link),
+ &tss.back_link, sizeof(tss.back_link), 0,
+ &pfinfo);
if ( rc == HVMCOPY_bad_gva_to_gfn )
exn_raised = 1;
else if ( rc != HVMCOPY_okay )
@@ -3047,8 +3047,8 @@ void hvm_task_switch(
16 << segr.attr.fields.db,
&linear_addr) )
{
- rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0,
- &pfinfo);
+ rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0,
+ &pfinfo);
if ( rc == HVMCOPY_bad_gva_to_gfn )
exn_raised = 1;
else if ( rc != HVMCOPY_okay )
@@ -3067,7 +3067,7 @@ void hvm_task_switch(
#define HVMCOPY_from_guest (0u<<0)
#define HVMCOPY_to_guest (1u<<0)
#define HVMCOPY_phys (0u<<2)
-#define HVMCOPY_virt (1u<<2)
+#define HVMCOPY_linear (1u<<2)
static enum hvm_copy_result __hvm_copy(
void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec,
pagefault_info_t *pfinfo)
@@ -3101,7 +3101,7 @@ static enum hvm_copy_result __hvm_copy(
count = min_t(int, PAGE_SIZE - gpa, todo);
- if ( flags & HVMCOPY_virt )
+ if ( flags & HVMCOPY_linear )
{
gfn = paging_gva_to_gfn(curr, addr, &pfec);
if ( gfn == gfn_x(INVALID_GFN) )
@@ -3295,30 +3295,30 @@ enum hvm_copy_result hvm_copy_from_guest_phys(
HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL);
}
-enum hvm_copy_result hvm_copy_to_guest_virt(
- unsigned long vaddr, void *buf, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_to_guest_linear(
+ unsigned long addr, void *buf, int size, uint32_t pfec,
pagefault_info_t *pfinfo)
{
- return __hvm_copy(buf, vaddr, size,
- HVMCOPY_to_guest | HVMCOPY_virt,
+ return __hvm_copy(buf, addr, size,
+ HVMCOPY_to_guest | HVMCOPY_linear,
PFEC_page_present | PFEC_write_access | pfec, pfinfo);
}
-enum hvm_copy_result hvm_copy_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_from_guest_linear(
+ void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo)
{
- return __hvm_copy(buf, vaddr, size,
- HVMCOPY_from_guest | HVMCOPY_virt,
+ return __hvm_copy(buf, addr, size,
+ HVMCOPY_from_guest | HVMCOPY_linear,
PFEC_page_present | pfec, pfinfo);
}
-enum hvm_copy_result hvm_fetch_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_fetch_from_guest_linear(
+ void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo)
{
- return __hvm_copy(buf, vaddr, size,
- HVMCOPY_from_guest | HVMCOPY_virt,
+ return __hvm_copy(buf, addr, size,
+ HVMCOPY_from_guest | HVMCOPY_linear,
PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
}
@@ -3333,7 +3333,7 @@ unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
return 0;
}
- rc = hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len, 0, NULL);
+ rc = hvm_copy_to_guest_linear((unsigned long)to, (void *)from, len, 0, NULL);
return rc ? len : 0; /* fake a copy_to_user() return code */
}
@@ -3363,7 +3363,7 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
return 0;
}
- rc = hvm_copy_from_guest_virt(to, (unsigned long)from, len, 0, NULL);
+ rc = hvm_copy_from_guest_linear(to, (unsigned long)from, len, 0, NULL);
return rc ? len : 0; /* fake a copy_from_user() return code */
}
@@ -4038,8 +4038,8 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
(hvm_long_mode_enabled(cur) &&
cs->attr.fields.l) ? 64 :
cs->attr.fields.db ? 32 : 16, &addr) &&
- (hvm_fetch_from_guest_virt(sig, addr, sizeof(sig),
- walk, NULL) == HVMCOPY_okay) &&
+ (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
+ walk, NULL) == HVMCOPY_okay) &&
(memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
{
regs->eip += sizeof(sig);
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 7342d12..fd7ea0a 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -452,7 +452,7 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
goto gp_fault;
if ( poperandS != NULL &&
- hvm_copy_from_guest_virt(poperandS, base, size, 0, &pfinfo)
+ hvm_copy_from_guest_linear(poperandS, base, size, 0, &pfinfo)
!= HVMCOPY_okay )
return X86EMUL_EXCEPTION;
decode->mem = base;
@@ -1622,7 +1622,7 @@ int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
gpa = nvcpu->nv_vvmcxaddr;
- rc = hvm_copy_to_guest_virt(decode.mem, &gpa, decode.len, 0, &pfinfo);
+ rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0, &pfinfo);
if ( rc != HVMCOPY_okay )
return X86EMUL_EXCEPTION;
@@ -1693,7 +1693,7 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs)
switch ( decode.type ) {
case VMX_INST_MEMREG_TYPE_MEMORY:
- rc = hvm_copy_to_guest_virt(decode.mem, &value, decode.len, 0, &pfinfo);
+ rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0, &pfinfo);
if ( rc != HVMCOPY_okay )
return X86EMUL_EXCEPTION;
break;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index b659324..0760e76 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -189,9 +189,9 @@ hvm_read(enum x86_segment seg,
return rc;
if ( access_type == hvm_access_insn_fetch )
- rc = hvm_fetch_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
+ rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
else
- rc = hvm_copy_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
+ rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
switch ( rc )
{
@@ -419,7 +419,7 @@ const struct x86_emulate_ops *shadow_init_emulation(
(!hvm_translate_linear_addr(
x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
- !hvm_fetch_from_guest_virt(
+ !hvm_fetch_from_guest_linear(
sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
? sizeof(sh_ctxt->insn_buf) : 0;
@@ -447,7 +447,7 @@ void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
(!hvm_translate_linear_addr(
x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
- !hvm_fetch_from_guest_virt(
+ !hvm_fetch_from_guest_linear(
sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
? sizeof(sh_ctxt->insn_buf) : 0;
sh_ctxt->insn_buf_eip = regs->eip;
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 114aa04..78349f8 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -73,7 +73,7 @@ enum hvm_copy_result hvm_copy_from_guest_phys(
void *buf, paddr_t paddr, int size);
/*
- * Copy to/from a guest virtual address. @pfec should include PFEC_user_mode
+ * Copy to/from a guest linear address. @pfec should include PFEC_user_mode
* if emulating a user-mode access (CPL=3). All other flags in @pfec are
* managed by the called function: it is therefore optional for the caller
* to set them.
@@ -95,14 +95,14 @@ typedef struct pagefault_info
int ec;
} pagefault_info_t;
-enum hvm_copy_result hvm_copy_to_guest_virt(
- unsigned long vaddr, void *buf, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_to_guest_linear(
+ unsigned long addr, void *buf, int size, uint32_t pfec,
pagefault_info_t *pfinfo);
-enum hvm_copy_result hvm_copy_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_from_guest_linear(
+ void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo);
-enum hvm_copy_result hvm_fetch_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_fetch_from_guest_linear(
+ void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo);
#define HVM_HCALL_completed 0 /* hypercall completed - no further action */
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-11-28 11:13 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-11-28 11:13 [PATCH for-4.9 v2 00/19] XSA-191 followup Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 01/19] x86/shadow: Fix #PFs from emulated writes crossing a page boundary Andrew Cooper
2016-11-28 11:55 ` Tim Deegan
2016-11-29 15:24 ` Jan Beulich
2016-11-28 11:13 ` [PATCH v2 02/19] x86/emul: Drop X86EMUL_CMPXCHG_FAILED Andrew Cooper
2016-11-28 11:55 ` Tim Deegan
2016-11-29 15:29 ` Jan Beulich
2016-11-28 11:13 ` [PATCH v2 03/19] x86/emul: Simplfy emulation state setup Andrew Cooper
2016-11-28 11:58 ` Paul Durrant
2016-11-28 12:54 ` Paul Durrant
2016-11-28 11:13 ` [PATCH v2 04/19] x86/emul: Rename hvm_trap to x86_event and move it into the emulation infrastructure Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 05/19] x86/emul: Rename HVM_DELIVER_NO_ERROR_CODE to X86_EVENT_NO_EC Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 06/19] x86/pv: Implement pv_inject_{event, page_fault, hw_exception}() Andrew Cooper
2016-11-28 11:58 ` Tim Deegan
2016-11-28 11:59 ` Andrew Cooper
2016-11-29 16:00 ` Jan Beulich
2016-11-29 16:50 ` Andrew Cooper
2016-11-30 8:41 ` Jan Beulich
2016-11-30 13:17 ` Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 07/19] x86/emul: Remove opencoded exception generation Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 08/19] x86/emul: Rework emulator event injection Andrew Cooper
2016-11-28 12:04 ` Tim Deegan
2016-11-28 12:48 ` Andrew Cooper
2016-11-28 14:24 ` Tim Deegan
2016-11-28 14:34 ` Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 09/19] x86/vmx: Use hvm_{get, set}_segment_register() rather than vmx_{get, set}_segment_register() Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 10/19] x86/hvm: Reposition the modification of raw segment data from the VMCB/VMCS Andrew Cooper
2016-11-28 14:18 ` Boris Ostrovsky
2016-11-28 11:13 ` [PATCH v2 11/19] x86/emul: Avoid raising faults behind the emulators back Andrew Cooper
2016-11-28 12:47 ` Paul Durrant
2016-11-29 16:02 ` Jan Beulich
2016-11-28 11:13 ` [PATCH v2 12/19] x86/pv: " Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 13/19] x86/shadow: " Andrew Cooper
2016-11-28 14:49 ` Tim Deegan
2016-11-28 16:04 ` Andrew Cooper
2016-11-28 17:21 ` Tim Deegan
2016-11-28 17:36 ` Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 14/19] x86/hvm: Extend the hvm_copy_*() API with a pagefault_info pointer Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 15/19] x86/hvm: Reimplement hvm_copy_*_nofault() in terms of no pagefault_info Andrew Cooper
2016-11-28 12:56 ` Paul Durrant
2016-11-28 11:13 ` Andrew Cooper [this message]
2016-11-28 11:59 ` [PATCH v2 16/19] x86/hvm: Rename hvm_copy_*_guest_virt() to hvm_copy_*_guest_linear() Paul Durrant
2016-11-28 11:13 ` [PATCH v2 17/19] x86/hvm: Avoid __hvm_copy() raising #PF behind the emulators back Andrew Cooper
2016-11-28 11:56 ` Paul Durrant
2016-11-28 12:58 ` Andrew Cooper
2016-11-28 13:01 ` Paul Durrant
2016-11-28 13:03 ` Andrew Cooper
2016-11-28 14:56 ` Tim Deegan
2016-11-28 16:32 ` Andrew Cooper
2016-11-28 16:42 ` Tim Deegan
2016-11-29 1:22 ` Tian, Kevin
2016-11-29 16:24 ` Jan Beulich
2016-11-29 16:30 ` Andrew Cooper
2016-11-29 16:36 ` Jan Beulich
2016-11-29 16:38 ` Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 18/19] x86/hvm: Prepare to allow use of system segments for memory references Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 19/19] x86/hvm: Use system-segment relative memory accesses Andrew Cooper
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1480331616-6165-17-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=paul.durrant@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).