From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xen.org
Cc: Paul Durrant <paul.durrant@citrix.com>,
Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v7 15/15] x86/hvm: track large memory mapped accesses by buffer offset
Date: Thu, 9 Jul 2015 14:10:55 +0100 [thread overview]
Message-ID: <1436447455-11524-16-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1436447455-11524-1-git-send-email-paul.durrant@citrix.com>
The code in hvmemul_do_io() that tracks large reads or writes, to avoid
re-issue of component I/O, is defeated by accesses across a page boundary
because it uses physical address. The code is also only relevant to memory
mapped I/O to or from a buffer.
This patch re-factors the code and moves it into hvmemul_phys_mmio_access()
where it is relevant and tracks using buffer offset rather than address.
Separate I/O emulations (of which there may be up to three per instruction)
are distinguished by linear address.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
v7:
- Added comment requested by Jan
- Changed BUG_ON() to domain_crash()
v6:
- Added Andrew's reviewed-by
v5:
- Fixed to cache up three distict I/O emulations per instruction
---
xen/arch/x86/hvm/emulate.c | 130 +++++++++++++++++++++-------------------
xen/include/asm-x86/hvm/vcpu.h | 25 +++++---
2 files changed, 86 insertions(+), 69 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index d7ee096..f4d61be 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -106,29 +106,6 @@ static int hvmemul_do_io(
return X86EMUL_UNHANDLEABLE;
}
- if ( is_mmio && !data_is_addr )
- {
- /* Part of a multi-cycle read or write? */
- if ( dir == IOREQ_WRITE )
- {
- paddr_t pa = vio->mmio_large_write_pa;
- unsigned int bytes = vio->mmio_large_write_bytes;
- if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
- return X86EMUL_OKAY;
- }
- else
- {
- paddr_t pa = vio->mmio_large_read_pa;
- unsigned int bytes = vio->mmio_large_read_bytes;
- if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
- {
- memcpy(p_data, &vio->mmio_large_read[addr - pa],
- size);
- return X86EMUL_OKAY;
- }
- }
- }
-
switch ( vio->io_req.state )
{
case STATE_IOREQ_NONE:
@@ -208,33 +185,6 @@ static int hvmemul_do_io(
memcpy(p_data, &p.data, size);
}
- if ( is_mmio && !data_is_addr )
- {
- /* Part of a multi-cycle read or write? */
- if ( dir == IOREQ_WRITE )
- {
- paddr_t pa = vio->mmio_large_write_pa;
- unsigned int bytes = vio->mmio_large_write_bytes;
- if ( bytes == 0 )
- pa = vio->mmio_large_write_pa = addr;
- if ( addr == (pa + bytes) )
- vio->mmio_large_write_bytes += size;
- }
- else
- {
- paddr_t pa = vio->mmio_large_read_pa;
- unsigned int bytes = vio->mmio_large_read_bytes;
- if ( bytes == 0 )
- pa = vio->mmio_large_read_pa = addr;
- if ( (addr == (pa + bytes)) &&
- ((bytes + size) <= sizeof(vio->mmio_large_read)) )
- {
- memcpy(&vio->mmio_large_read[bytes], p_data, size);
- vio->mmio_large_read_bytes += size;
- }
- }
- }
-
return X86EMUL_OKAY;
}
@@ -590,11 +540,12 @@ static int hvmemul_virtual_to_linear(
}
static int hvmemul_phys_mmio_access(
- paddr_t gpa, unsigned int size, uint8_t dir, uint8_t *buffer)
+ struct hvm_mmio_cache *cache, paddr_t gpa, unsigned int size, uint8_t dir,
+ uint8_t *buffer, unsigned int offset)
{
unsigned long one_rep = 1;
unsigned int chunk;
- int rc;
+ int rc = X86EMUL_OKAY;
/* Accesses must fall within a page. */
BUG_ON((gpa & ~PAGE_MASK) + size > PAGE_SIZE);
@@ -611,14 +562,33 @@ static int hvmemul_phys_mmio_access(
for ( ;; )
{
- rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
- buffer);
- if ( rc != X86EMUL_OKAY )
- break;
+ /* Have we already done this chunk? */
+ if ( offset < cache->size )
+ {
+ ASSERT((offset + chunk) <= cache->size);
+
+ if ( dir == IOREQ_READ )
+ memcpy(&buffer[offset], &cache->buffer[offset], chunk);
+ else if ( memcmp(&buffer[offset], &cache->buffer[offset], chunk) != 0 )
+ domain_crash(current->domain);
+ }
+ else
+ {
+ ASSERT(offset == cache->size);
+
+ rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
+ &buffer[offset]);
+ if ( rc != X86EMUL_OKAY )
+ break;
+
+ /* Note that we have now done this chunk. */
+ memcpy(&cache->buffer[offset], &buffer[offset], chunk);
+ cache->size += chunk;
+ }
/* Advance to the next chunk. */
gpa += chunk;
- buffer += chunk;
+ offset += chunk;
size -= chunk;
if ( size == 0 )
@@ -635,13 +605,49 @@ static int hvmemul_phys_mmio_access(
return rc;
}
+/*
+ * Multi-cycle MMIO handling is based upon the assumption that emulation
+ * of the same instruction will not access the same MMIO region more
+ * than once. Hence we can deal with re-emulation (for secondary or
+ * subsequent cycles) by looking up the result or previous I/O in a
+ * cache indexed by linear MMIO address.
+ */
+static struct hvm_mmio_cache *hvmemul_find_mmio_cache(
+ struct hvm_vcpu_io *vio, unsigned long gla, uint8_t dir)
+{
+ unsigned int i;
+ struct hvm_mmio_cache *cache;
+
+ for ( i = 0; i < vio->mmio_cache_count; i ++ )
+ {
+ cache = &vio->mmio_cache[i];
+
+ if ( gla == cache->gla &&
+ dir == cache->dir )
+ return cache;
+ }
+
+ i = vio->mmio_cache_count++;
+ if( i == ARRAY_SIZE(vio->mmio_cache) )
+ domain_crash(current->domain);
+
+ cache = &vio->mmio_cache[i];
+ memset(cache, 0, sizeof (*cache));
+
+ cache->gla = gla;
+ cache->dir = dir;
+
+ return cache;
+}
+
static int hvmemul_linear_mmio_access(
- unsigned long gla, unsigned int size, uint8_t dir, uint8_t *buffer,
+ unsigned long gla, unsigned int size, uint8_t dir, void *buffer,
uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, bool_t known_gpfn)
{
struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
unsigned long offset = gla & ~PAGE_MASK;
- unsigned int chunk;
+ struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(vio, gla, dir);
+ unsigned int chunk, buffer_offset = 0;
paddr_t gpa;
unsigned long one_rep = 1;
int rc;
@@ -660,12 +666,12 @@ static int hvmemul_linear_mmio_access(
for ( ;; )
{
- rc = hvmemul_phys_mmio_access(gpa, chunk, dir, buffer);
+ rc = hvmemul_phys_mmio_access(cache, gpa, chunk, dir, buffer, buffer_offset);
if ( rc != X86EMUL_OKAY )
break;
gla += chunk;
- buffer += chunk;
+ buffer_offset += chunk;
size -= chunk;
if ( size == 0 )
@@ -1612,7 +1618,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
rc = X86EMUL_RETRY;
if ( rc != X86EMUL_RETRY )
{
- vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
+ vio->mmio_cache_count = 0;
vio->mmio_insn_bytes = 0;
}
else
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 13ff54f..6ee693f 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -42,6 +42,17 @@ struct hvm_vcpu_asid {
uint32_t asid;
};
+/*
+ * We may read or write up to m256 as a number of device-model
+ * transactions.
+ */
+struct hvm_mmio_cache {
+ unsigned long gla;
+ unsigned int size;
+ uint8_t dir;
+ uint8_t buffer[32];
+};
+
struct hvm_vcpu_io {
/* I/O request in flight to device model. */
enum hvm_io_completion io_completion;
@@ -57,13 +68,13 @@ struct hvm_vcpu_io {
unsigned long mmio_gva;
unsigned long mmio_gpfn;
- /* We may read up to m256 as a number of device-model transactions. */
- paddr_t mmio_large_read_pa;
- uint8_t mmio_large_read[32];
- unsigned int mmio_large_read_bytes;
- /* We may write up to m256 as a number of device-model transactions. */
- unsigned int mmio_large_write_bytes;
- paddr_t mmio_large_write_pa;
+ /*
+ * We may need to handle up to 3 distinct memory accesses per
+ * instruction.
+ */
+ struct hvm_mmio_cache mmio_cache[3];
+ unsigned int mmio_cache_count;
+
/* For retries we shouldn't re-fetch the instruction. */
unsigned int mmio_insn_bytes;
unsigned char mmio_insn[16];
--
1.7.10.4
next prev parent reply other threads:[~2015-07-09 13:10 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-07-09 13:10 [PATCH v7 00/15] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-07-09 13:10 ` [PATCH v7 01/15] x86/hvm: remove multiple open coded 'chunking' loops Paul Durrant
2015-07-09 15:13 ` Jan Beulich
2015-07-09 16:16 ` Paul Durrant
2015-07-09 16:24 ` Jan Beulich
2015-07-09 16:27 ` Paul Durrant
2015-07-09 13:10 ` [PATCH v7 02/15] x86/hvm: change hvm_mmio_read_t and hvm_mmio_write_t length argument Paul Durrant
2015-07-09 13:10 ` [PATCH v7 03/15] x86/hvm: restrict port numbers and uint16_t and sizes to unsigned int Paul Durrant
2015-07-09 15:24 ` Jan Beulich
2015-07-09 16:10 ` Paul Durrant
2015-07-09 16:20 ` Jan Beulich
2015-07-09 16:23 ` Paul Durrant
2015-07-09 16:31 ` Jan Beulich
2015-07-09 13:10 ` [PATCH v7 04/15] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-07-09 13:10 ` [PATCH v7 05/15] x86/hvm: add length to mmio check op Paul Durrant
2015-07-09 13:10 ` [PATCH v7 06/15] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-07-09 13:10 ` [PATCH v7 07/15] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-07-09 15:33 ` Jan Beulich
2015-07-09 16:12 ` Paul Durrant
2015-07-09 16:21 ` Jan Beulich
2015-07-09 16:24 ` Paul Durrant
2015-07-09 13:10 ` [PATCH v7 08/15] x86/hvm: limit reps to avoid the need to handle retry Paul Durrant
2015-07-09 13:10 ` [PATCH v7 09/15] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-07-09 13:10 ` [PATCH v7 10/15] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-07-09 13:10 ` [PATCH v7 11/15] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-07-09 13:10 ` [PATCH v7 12/15] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-07-09 13:10 ` [PATCH v7 13/15] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-07-09 13:10 ` [PATCH v7 14/15] x86/hvm: always re-emulate I/O from a buffer Paul Durrant
2015-07-09 13:10 ` Paul Durrant [this message]
2015-07-09 15:46 ` [PATCH v7 15/15] x86/hvm: track large memory mapped accesses by buffer offset Jan Beulich
2015-07-09 16:05 ` Paul Durrant
2015-07-10 9:27 ` [PATCH v7 00/15] x86/hvm: I/O emulation cleanup and fix | Full Backtrace of domU's X crash caused by SSE2 istruction in attachment Fabio Fantoni
2015-07-10 9:31 ` Paul Durrant
2015-07-10 9:54 ` Fabio Fantoni
2015-07-10 10:09 ` Fabio Fantoni
2015-07-10 10:13 ` Paul Durrant
2015-07-10 10:20 ` Jan Beulich
2015-07-10 10:51 ` Fabio Fantoni
2015-07-10 11:00 ` Jan Beulich
2015-07-09 19:32 ` Zhi Wang
2015-07-10 11:46 ` Jan Beulich
2015-07-10 11:49 ` Fabio Fantoni
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1436447455-11524-16-git-send-email-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).