From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>,
Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v6 02/16] x86/hvm: remove multiple open coded 'chunking' loops
Date: Fri, 3 Jul 2015 17:25:19 +0100 [thread overview]
Message-ID: <1435940733-20856-3-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1435940733-20856-1-git-send-email-paul.durrant@citrix.com>
...in hvmemul_read/write()
Add hvmemul_phys_mmio_access() and hvmemul_linear_mmio_access() functions
to reduce code duplication.
NOTE: This patch also introduces a change in 'chunking' around a page
boundary. Previously (for example) an 8 byte access at the last
byte of a page would get carried out as 8 single-byte accesses.
It will now be carried out as a single-byte access, followed by
a 4-byte access, a 2-byte access and then another single-byte
access.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/hvm/emulate.c | 223 +++++++++++++++++++++++---------------------
1 file changed, 116 insertions(+), 107 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 8b60843..b823d84 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -539,6 +539,117 @@ static int hvmemul_virtual_to_linear(
return X86EMUL_EXCEPTION;
}
+static int hvmemul_phys_mmio_access(
+ paddr_t gpa, unsigned int size, uint8_t dir, uint8_t *buffer)
+{
+ unsigned long one_rep = 1;
+ unsigned int chunk;
+ int rc;
+
+ /* Accesses must fall within a page. */
+ BUG_ON((gpa & ~PAGE_MASK) + size > PAGE_SIZE);
+
+ /*
+ * hvmemul_do_io() cannot handle non-power-of-2 accesses or
+ * accesses larger than sizeof(long), so choose the highest power
+ * of 2 not exceeding sizeof(long) as the 'chunk' size.
+ */
+ ASSERT(size != 0);
+ chunk = 1u << (fls(size) - 1);
+ if ( chunk > sizeof (long) )
+ chunk = sizeof (long);
+
+ for ( ;; )
+ {
+ rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
+ buffer);
+ if ( rc != X86EMUL_OKAY )
+ break;
+
+ /* Advance to the next chunk. */
+ gpa += chunk;
+ buffer += chunk;
+ size -= chunk;
+
+ if ( size == 0 )
+ break;
+
+ /*
+ * If the chunk now exceeds the remaining size, choose the next
+ * lowest power of 2 that will fit.
+ */
+ while ( chunk > size )
+ chunk >>= 1;
+ }
+
+ return rc;
+}
+
+static int hvmemul_linear_mmio_access(
+ unsigned long gla, unsigned int size, uint8_t dir, uint8_t *buffer,
+ uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, bool_t known_gpfn)
+{
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ unsigned long offset = gla & ~PAGE_MASK;
+ unsigned int chunk;
+ paddr_t gpa;
+ unsigned long one_rep = 1;
+ int rc;
+
+ chunk = min_t(unsigned int, size, PAGE_SIZE - offset);
+
+ if ( known_gpfn )
+ gpa = pfn_to_paddr(vio->mmio_gpfn) | offset;
+ else
+ {
+ rc = hvmemul_linear_to_phys(gla, &gpa, chunk, &one_rep, pfec,
+ hvmemul_ctxt);
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+ }
+
+ for ( ;; )
+ {
+ rc = hvmemul_phys_mmio_access(gpa, chunk, dir, buffer);
+ if ( rc != X86EMUL_OKAY )
+ break;
+
+ gla += chunk;
+ buffer += chunk;
+ size -= chunk;
+
+ if ( size == 0 )
+ break;
+
+ ASSERT((gla & ~PAGE_MASK) == 0);
+ chunk = min_t(unsigned int, size, PAGE_SIZE);
+ rc = hvmemul_linear_to_phys(gla, &gpa, chunk, &one_rep, pfec,
+ hvmemul_ctxt);
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+ }
+
+ return rc;
+}
+
+static inline int hvmemul_linear_mmio_read(
+ unsigned long gla, unsigned int size, void *buffer,
+ uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt,
+ bool_t translate)
+{
+ return hvmemul_linear_mmio_access(gla, size, IOREQ_READ, buffer,
+ pfec, hvmemul_ctxt, translate);
+}
+
+static inline int hvmemul_linear_mmio_write(
+ unsigned long gla, unsigned int size, void *buffer,
+ uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt,
+ bool_t translate)
+{
+ return hvmemul_linear_mmio_access(gla, size, IOREQ_WRITE, buffer,
+ pfec, hvmemul_ctxt, translate);
+}
+
static int __hvmemul_read(
enum x86_segment seg,
unsigned long offset,
@@ -549,51 +660,19 @@ static int __hvmemul_read(
{
struct vcpu *curr = current;
unsigned long addr, reps = 1;
- unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER);
uint32_t pfec = PFEC_page_present;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
- paddr_t gpa;
int rc;
rc = hvmemul_virtual_to_linear(
seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY )
return rc;
- off = addr & (PAGE_SIZE - 1);
- /*
- * We only need to handle sizes actual instruction operands can have. All
- * such sizes are either powers of 2 or the sum of two powers of 2. Thus
- * picking as initial chunk size the largest power of 2 not greater than
- * the total size will always result in only power-of-2 size requests
- * issued to hvmemul_do_mmio() (hvmemul_do_io() rejects non-powers-of-2).
- */
- while ( chunk & (chunk - 1) )
- chunk &= chunk - 1;
- if ( off + bytes > PAGE_SIZE )
- while ( off & (chunk - 1) )
- chunk >>= 1;
-
if ( ((access_type != hvm_access_insn_fetch
? vio->mmio_access.read_access
: vio->mmio_access.insn_fetch)) &&
(vio->mmio_gva == (addr & PAGE_MASK)) )
- {
- gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
- while ( (off + chunk) <= PAGE_SIZE )
- {
- rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_READ, 0,
- p_data);
- if ( rc != X86EMUL_OKAY || bytes == chunk )
- return rc;
- addr += chunk;
- off += chunk;
- gpa += chunk;
- p_data += chunk;
- bytes -= chunk;
- if ( bytes < chunk )
- chunk = bytes;
- }
- }
+ return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
if ( (seg != x86_seg_none) &&
(hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
@@ -612,30 +691,8 @@ static int __hvmemul_read(
case HVMCOPY_bad_gfn_to_mfn:
if ( access_type == hvm_access_insn_fetch )
return X86EMUL_UNHANDLEABLE;
- rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
- hvmemul_ctxt);
- while ( rc == X86EMUL_OKAY )
- {
- rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_READ, 0,
- p_data);
- if ( rc != X86EMUL_OKAY || bytes == chunk )
- break;
- addr += chunk;
- off += chunk;
- p_data += chunk;
- bytes -= chunk;
- if ( bytes < chunk )
- chunk = bytes;
- if ( off < PAGE_SIZE )
- gpa += chunk;
- else
- {
- rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
- hvmemul_ctxt);
- off = 0;
- }
- }
- return rc;
+
+ return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
case HVMCOPY_gfn_paged_out:
case HVMCOPY_gfn_shared:
return X86EMUL_RETRY;
@@ -701,43 +758,18 @@ static int hvmemul_write(
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
struct vcpu *curr = current;
unsigned long addr, reps = 1;
- unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER);
uint32_t pfec = PFEC_page_present | PFEC_write_access;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
- paddr_t gpa;
int rc;
rc = hvmemul_virtual_to_linear(
seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY )
return rc;
- off = addr & (PAGE_SIZE - 1);
- /* See the respective comment in __hvmemul_read(). */
- while ( chunk & (chunk - 1) )
- chunk &= chunk - 1;
- if ( off + bytes > PAGE_SIZE )
- while ( off & (chunk - 1) )
- chunk >>= 1;
if ( vio->mmio_access.write_access &&
(vio->mmio_gva == (addr & PAGE_MASK)) )
- {
- gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
- while ( (off + chunk) <= PAGE_SIZE )
- {
- rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_WRITE, 0,
- p_data);
- if ( rc != X86EMUL_OKAY || bytes == chunk )
- return rc;
- addr += chunk;
- off += chunk;
- gpa += chunk;
- p_data += chunk;
- bytes -= chunk;
- if ( bytes < chunk )
- chunk = bytes;
- }
- }
+ return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
if ( (seg != x86_seg_none) &&
(hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
@@ -752,30 +784,7 @@ static int hvmemul_write(
case HVMCOPY_bad_gva_to_gfn:
return X86EMUL_EXCEPTION;
case HVMCOPY_bad_gfn_to_mfn:
- rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
- hvmemul_ctxt);
- while ( rc == X86EMUL_OKAY )
- {
- rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_WRITE, 0,
- p_data);
- if ( rc != X86EMUL_OKAY || bytes == chunk )
- break;
- addr += chunk;
- off += chunk;
- p_data += chunk;
- bytes -= chunk;
- if ( bytes < chunk )
- chunk = bytes;
- if ( off < PAGE_SIZE )
- gpa += chunk;
- else
- {
- rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
- hvmemul_ctxt);
- off = 0;
- }
- }
- return rc;
+ return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
case HVMCOPY_gfn_paged_out:
case HVMCOPY_gfn_shared:
return X86EMUL_RETRY;
--
1.7.10.4
next prev parent reply other threads:[~2015-07-03 16:25 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-07-03 16:25 [PATCH v6 00/16] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-07-03 16:25 ` [PATCH v6 01/16] x86/hvm: make sure emulation is retried if domain is shutting down Paul Durrant
2015-07-03 16:25 ` Paul Durrant [this message]
2015-07-03 17:17 ` [PATCH v6 02/16] x86/hvm: remove multiple open coded 'chunking' loops Andrew Cooper
2015-07-08 15:52 ` Jan Beulich
2015-07-08 15:57 ` Paul Durrant
2015-07-08 16:18 ` Jan Beulich
2015-07-08 16:43 ` Andrew Cooper
2015-07-09 6:53 ` Jan Beulich
2015-07-09 8:15 ` Paul Durrant
2015-07-09 9:19 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 03/16] x86/hvm: change hvm_mmio_read_t and hvm_mmio_write_t length argument Paul Durrant
2015-07-03 16:25 ` [PATCH v6 04/16] x86/hvm: restrict port numbers to uint16_t and sizes to unsigned int Paul Durrant
2015-07-08 15:57 ` Jan Beulich
2015-07-08 15:59 ` Paul Durrant
2015-07-03 16:25 ` [PATCH v6 05/16] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-07-08 16:11 ` Jan Beulich
2015-07-08 16:28 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 06/16] x86/hvm: add length to mmio check op Paul Durrant
2015-07-03 16:25 ` [PATCH v6 07/16] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-07-08 16:29 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 08/16] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-07-08 16:17 ` Jan Beulich
2015-07-09 9:40 ` Paul Durrant
2015-07-09 8:53 ` Jan Beulich
2015-07-09 9:00 ` Paul Durrant
2015-07-09 9:21 ` Jan Beulich
2015-07-09 9:17 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 09/16] x86/hvm: limit reps to avoid the need to handle retry Paul Durrant
2015-07-03 17:18 ` Andrew Cooper
2015-07-09 10:05 ` Jan Beulich
2015-07-09 11:11 ` Paul Durrant
2015-07-09 12:04 ` Jan Beulich
2015-07-09 12:50 ` Paul Durrant
2015-07-09 13:38 ` Jan Beulich
2015-07-09 13:42 ` Paul Durrant
2015-07-09 14:00 ` Paul Durrant
2015-07-09 14:19 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 10/16] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-07-03 16:25 ` [PATCH v6 11/16] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-07-09 10:09 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 12/16] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-07-09 10:13 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 13/16] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-07-03 16:25 ` [PATCH v6 14/16] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-07-03 16:25 ` [PATCH v6 15/16] x86/hvm: always re-emulate I/O from a buffer Paul Durrant
2015-07-03 16:25 ` [PATCH v6 16/16] x86/hvm: track large memory mapped accesses by buffer offset Paul Durrant
2015-07-09 10:33 ` Jan Beulich
2015-07-09 10:36 ` Paul Durrant
2015-07-09 10:34 ` Jan Beulich
2015-07-08 15:44 ` [PATCH v6 00/16] x86/hvm: I/O emulation cleanup and fix Jan Beulich
2015-07-09 11:31 ` Paul Durrant
2015-07-09 11:43 ` David Vrabel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1435940733-20856-3-git-send-email-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).