From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>,
Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v5 16/16] x86/hvm: track large memory mapped accesses by buffer offset
Date: Tue, 30 Jun 2015 14:05:58 +0100 [thread overview]
Message-ID: <1435669558-5421-17-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1435669558-5421-1-git-send-email-paul.durrant@citrix.com>
The code in hvmemul_do_io() that tracks large reads or writes, to avoid
re-issue of component I/O, is defeated by accesses across a page boundary
because it uses physical address. The code is also only relevant to memory
mapped I/O to or from a buffer.
This patch re-factors the code and moves it into hvmemul_phys_mmio_access()
where it is relevant and tracks using buffer offset rather than address.
Separate I/O emulations (of which there may be up to three per instruction)
are distinguished by linear address.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/hvm/emulate.c | 122 ++++++++++++++++++++--------------------
xen/include/asm-x86/hvm/vcpu.h | 25 +++++---
2 files changed, 78 insertions(+), 69 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 8bb56a2..d83d21c 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -106,29 +106,6 @@ static int hvmemul_do_io(
return X86EMUL_UNHANDLEABLE;
}
- if ( is_mmio && !data_is_addr )
- {
- /* Part of a multi-cycle read or write? */
- if ( dir == IOREQ_WRITE )
- {
- paddr_t pa = vio->mmio_large_write_pa;
- unsigned int bytes = vio->mmio_large_write_bytes;
- if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
- return X86EMUL_OKAY;
- }
- else
- {
- paddr_t pa = vio->mmio_large_read_pa;
- unsigned int bytes = vio->mmio_large_read_bytes;
- if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
- {
- memcpy(p_data, &vio->mmio_large_read[addr - pa],
- size);
- return X86EMUL_OKAY;
- }
- }
- }
-
switch ( vio->io_req.state )
{
case STATE_IOREQ_NONE:
@@ -208,33 +185,6 @@ static int hvmemul_do_io(
memcpy(p_data, &p.data, size);
}
- if ( is_mmio && !data_is_addr )
- {
- /* Part of a multi-cycle read or write? */
- if ( dir == IOREQ_WRITE )
- {
- paddr_t pa = vio->mmio_large_write_pa;
- unsigned int bytes = vio->mmio_large_write_bytes;
- if ( bytes == 0 )
- pa = vio->mmio_large_write_pa = addr;
- if ( addr == (pa + bytes) )
- vio->mmio_large_write_bytes += size;
- }
- else
- {
- paddr_t pa = vio->mmio_large_read_pa;
- unsigned int bytes = vio->mmio_large_read_bytes;
- if ( bytes == 0 )
- pa = vio->mmio_large_read_pa = addr;
- if ( (addr == (pa + bytes)) &&
- ((bytes + size) <= sizeof(vio->mmio_large_read)) )
- {
- memcpy(&vio->mmio_large_read[bytes], p_data, size);
- vio->mmio_large_read_bytes += size;
- }
- }
- }
-
return X86EMUL_OKAY;
}
@@ -587,11 +537,12 @@ static int hvmemul_virtual_to_linear(
}
static int hvmemul_phys_mmio_access(
- paddr_t gpa, unsigned int size, uint8_t dir, uint8_t *buffer)
+ struct hvm_mmio_cache *cache, paddr_t gpa, unsigned int size, uint8_t dir,
+ uint8_t *buffer, unsigned int off)
{
unsigned long one_rep = 1;
unsigned int chunk;
- int rc;
+ int rc = X86EMUL_OKAY;
/* Accesses must fall within a page */
BUG_ON((gpa & (PAGE_SIZE - 1)) + size > PAGE_SIZE);
@@ -607,14 +558,33 @@ static int hvmemul_phys_mmio_access(
for ( ;; )
{
- rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
- buffer);
- if ( rc != X86EMUL_OKAY )
- break;
+ /* Have we already done this chunk? */
+ if ( off < cache->size )
+ {
+ ASSERT((off + chunk) <= cache->size);
+
+ if ( dir == IOREQ_READ )
+ memcpy(&buffer[off], &cache->buffer[off], chunk);
+ else if ( memcmp(&buffer[off], &cache->buffer[off], chunk) != 0 )
+ domain_crash(current->domain);
+ }
+ else
+ {
+ ASSERT(off == cache->size);
+
+ rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
+ &buffer[off]);
+ if ( rc != X86EMUL_OKAY )
+ break;
+
+ /* Note that we have now done this chunk. */
+ memcpy(&cache->buffer[off], &buffer[off], chunk);
+ cache->size += chunk;
+ }
/* Advance to the next chunk */
gpa += chunk;
- buffer += chunk;
+ off += chunk;
size -= chunk;
if ( size == 0 )
@@ -631,13 +601,41 @@ static int hvmemul_phys_mmio_access(
return rc;
}
+static struct hvm_mmio_cache *hvmemul_find_mmio_cache(
+ struct hvm_vcpu_io *vio, unsigned long gla, uint8_t dir)
+{
+ unsigned int i;
+ struct hvm_mmio_cache *cache;
+
+ for ( i = 0; i < vio->mmio_cache_count; i ++ )
+ {
+ cache = &vio->mmio_cache[i];
+
+ if ( gla == cache->gla &&
+ dir == cache->dir )
+ return cache;
+ }
+
+ i = vio->mmio_cache_count++;
+ BUG_ON(i == ARRAY_SIZE(vio->mmio_cache));
+
+ cache = &vio->mmio_cache[i];
+ memset(cache, 0, sizeof (*cache));
+
+ cache->gla = gla;
+ cache->dir = dir;
+
+ return cache;
+}
+
static int hvmemul_linear_mmio_access(
- unsigned long gla, unsigned int size, uint8_t dir, uint8_t *buffer,
+ unsigned long gla, unsigned int size, uint8_t dir, void *buffer,
uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, bool_t known_gpfn)
{
struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
unsigned long page_off = gla & (PAGE_SIZE - 1);
- unsigned int chunk;
+ struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(vio, gla, dir);
+ unsigned int chunk, buffer_off = 0;
paddr_t gpa;
unsigned long one_rep = 1;
int rc;
@@ -656,12 +654,12 @@ static int hvmemul_linear_mmio_access(
for ( ;; )
{
- rc = hvmemul_phys_mmio_access(gpa, chunk, dir, buffer);
+ rc = hvmemul_phys_mmio_access(cache, gpa, chunk, dir, buffer, buffer_off);
if ( rc != X86EMUL_OKAY )
break;
gla += chunk;
- buffer += chunk;
+ buffer_off += chunk;
size -= chunk;
if ( size == 0 )
@@ -1609,7 +1607,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
rc = X86EMUL_RETRY;
if ( rc != X86EMUL_RETRY )
{
- vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
+ vio->mmio_cache_count = 0;
vio->mmio_insn_bytes = 0;
}
else
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 506ce89..90dea8d 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -42,6 +42,17 @@ struct hvm_vcpu_asid {
uint32_t asid;
};
+/*
+ * We may read or write up to m256 as a number of device-model
+ * transactions.
+ */
+struct hvm_mmio_cache {
+ unsigned long size;
+ uint8_t buffer[32];
+ unsigned long gla;
+ uint8_t dir;
+};
+
struct hvm_vcpu_io {
/* I/O request in flight to device model. */
ioreq_t io_req;
@@ -57,13 +68,13 @@ struct hvm_vcpu_io {
unsigned long mmio_gva;
unsigned long mmio_gpfn;
- /* We may read up to m256 as a number of device-model transactions. */
- paddr_t mmio_large_read_pa;
- uint8_t mmio_large_read[32];
- unsigned int mmio_large_read_bytes;
- /* We may write up to m256 as a number of device-model transactions. */
- unsigned int mmio_large_write_bytes;
- paddr_t mmio_large_write_pa;
+ /*
+ * We may need to handle up to 3 distinct memory accesses per
+ * instruction.
+ */
+ struct hvm_mmio_cache mmio_cache[3];
+ unsigned int mmio_cache_count;
+
/* For retries we shouldn't re-fetch the instruction. */
unsigned int mmio_insn_bytes;
unsigned char mmio_insn[16];
--
1.7.10.4
next prev parent reply other threads:[~2015-06-30 13:15 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-06-30 13:05 [PATCH v5 00/16] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-06-30 13:05 ` [PATCH v5 01/16] x86/hvm: make sure emulation is retried if domain is shutting down Paul Durrant
2015-06-30 13:45 ` Andrew Cooper
2015-06-30 16:14 ` Don Slutz
2015-06-30 16:29 ` Paul Durrant
2015-06-30 13:05 ` [PATCH v5 02/16] x86/hvm: remove multiple open coded 'chunking' loops Paul Durrant
2015-07-02 15:37 ` Andrew Cooper
2015-07-02 15:55 ` Paul Durrant
2015-07-02 16:03 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 03/16] x86/hvm: change hvm_mmio_read_t and hvm_mmio_write_t length argument Paul Durrant
2015-07-02 15:39 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 04/16] x86/hvm: restrict port numbers to uint16_t and sizes to unsigned int Paul Durrant
2015-07-02 15:54 ` Andrew Cooper
2015-07-02 15:56 ` Paul Durrant
2015-06-30 13:05 ` [PATCH v5 05/16] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-07-02 14:52 ` Roger Pau Monné
2015-07-02 15:02 ` Paul Durrant
2015-07-02 15:12 ` Roger Pau Monné
2015-07-02 15:12 ` Paul Durrant
2015-07-02 16:29 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 06/16] x86/hvm: add length to mmio check op Paul Durrant
2015-07-02 16:37 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 07/16] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-07-02 16:50 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 08/16] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-07-02 16:55 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 09/16] x86/hvm: limit reps to avoid the need to handle retry Paul Durrant
2015-07-02 17:10 ` Andrew Cooper
2015-07-02 17:14 ` Paul Durrant
2015-07-02 17:31 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 10/16] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-07-03 15:03 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 11/16] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-07-03 15:08 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 12/16] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-07-03 15:12 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 13/16] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-07-03 15:13 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 14/16] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-07-03 15:15 ` Andrew Cooper
2015-06-30 13:05 ` [PATCH v5 15/16] x86/hvm: always re-emulate I/O from a buffer Paul Durrant
2015-07-03 15:26 ` Andrew Cooper
2015-06-30 13:05 ` Paul Durrant [this message]
2015-07-03 15:26 ` [PATCH v5 16/16] x86/hvm: track large memory mapped accesses by buffer offset Andrew Cooper
2015-06-30 14:48 ` [PATCH v5 00/16] x86/hvm: I/O emulation cleanup and fix Fabio Fantoni
2015-07-07 11:19 ` Fabio Fantoni
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1435669558-5421-17-git-send-email-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).