From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>,
Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v6 09/16] x86/hvm: limit reps to avoid the need to handle retry
Date: Fri, 3 Jul 2015 17:25:26 +0100 [thread overview]
Message-ID: <1435940733-20856-10-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1435940733-20856-1-git-send-email-paul.durrant@citrix.com>
By limiting hvmemul_do_io_addr() to reps falling within the page on which
a reference has already been taken, we can guarantee that calls to
hvm_copy_to/from_guest_phys() will not hit the HVMCOPY_gfn_paged_out
or HVMCOPY_gfn_shared cases. Thus we can remove the retry logic (added
by c/s 82ed8716b "fix direct PCI port I/O emulation retry and error
handling") from the intercept code and simplify it significantly.
Normally hvmemul_do_io_addr() will only reference single page at a time.
It will, however, take an extra page reference for I/O spanning a page
boundary.
It is still important to know, upon returning from x86_emulate(), whether
the number of reps was reduced so the mmio_retry flag is retained for that
purpose.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/hvm/emulate.c | 76 ++++++++++++++++++++++++++--------------
xen/arch/x86/hvm/hvm.c | 4 +++
xen/arch/x86/hvm/intercept.c | 57 +++++++-----------------------
xen/include/asm-x86/hvm/vcpu.h | 2 +-
4 files changed, 66 insertions(+), 73 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 1afd626..c80c488 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -51,7 +51,7 @@ static void hvmtrace_io_assist(const ioreq_t *p)
}
static int hvmemul_do_io(
- bool_t is_mmio, paddr_t addr, unsigned long *reps, unsigned int size,
+ bool_t is_mmio, paddr_t addr, unsigned long reps, unsigned int size,
uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data)
{
struct vcpu *curr = current;
@@ -60,6 +60,7 @@ static int hvmemul_do_io(
.type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
.addr = addr,
.size = size,
+ .count = reps,
.dir = dir,
.df = df,
.data = data,
@@ -125,15 +126,6 @@ static int hvmemul_do_io(
HVMIO_dispatched : HVMIO_awaiting_completion;
vio->io_size = size;
- /*
- * When retrying a repeated string instruction, force exit to guest after
- * completion of the retried iteration to allow handling of interrupts.
- */
- if ( vio->mmio_retrying )
- *reps = 1;
-
- p.count = *reps;
-
if ( dir == IOREQ_WRITE )
{
if ( !data_is_addr )
@@ -147,17 +139,9 @@ static int hvmemul_do_io(
switch ( rc )
{
case X86EMUL_OKAY:
- case X86EMUL_RETRY:
- *reps = p.count;
p.state = STATE_IORESP_READY;
- if ( !vio->mmio_retry )
- {
- hvm_io_assist(&p);
- vio->io_state = HVMIO_none;
- }
- else
- /* Defer hvm_io_assist() invocation to hvm_do_resume(). */
- vio->io_state = HVMIO_handle_mmio_awaiting_completion;
+ hvm_io_assist(&p);
+ vio->io_state = HVMIO_none;
break;
case X86EMUL_UNHANDLEABLE:
{
@@ -235,7 +219,7 @@ static int hvmemul_do_io_buffer(
BUG_ON(buffer == NULL);
- rc = hvmemul_do_io(is_mmio, addr, reps, size, dir, df, 0,
+ rc = hvmemul_do_io(is_mmio, addr, *reps, size, dir, df, 0,
(uintptr_t)buffer);
if ( rc == X86EMUL_UNHANDLEABLE && dir == IOREQ_READ )
memset(buffer, 0xff, size);
@@ -287,17 +271,56 @@ static int hvmemul_do_io_addr(
bool_t is_mmio, paddr_t addr, unsigned long *reps,
unsigned int size, uint8_t dir, bool_t df, paddr_t ram_gpa)
{
- struct page_info *ram_page;
+ struct vcpu *v = current;
+ unsigned long ram_gmfn = paddr_to_pfn(ram_gpa);
+ unsigned int page_off = ram_gpa & (PAGE_SIZE - 1);
+ struct page_info *ram_page[2];
+ int nr_pages = 0;
+ unsigned long count;
int rc;
- rc = hvmemul_acquire_page(paddr_to_pfn(ram_gpa), &ram_page);
+ rc = hvmemul_acquire_page(ram_gmfn, &ram_page[nr_pages]);
if ( rc != X86EMUL_OKAY )
- return rc;
+ goto out;
- rc = hvmemul_do_io(is_mmio, addr, reps, size, dir, df, 1,
+ nr_pages++;
+
+ /* Detemine how many reps will fit within this page */
+ count = min_t(unsigned long,
+ *reps,
+ df ?
+ (page_off + size - 1) / size :
+ (PAGE_SIZE - page_off) / size);
+
+ if ( count == 0 )
+ {
+ /*
+ * This access must span two pages, so grab a reference to
+ * the next page and do a single rep.
+ * It is safe to assume multiple pages are physically
+ * contiguous at this point as hvmemul_linear_to_phys() will
+ * ensure this is the case.
+ */
+ rc = hvmemul_acquire_page(df ? ram_gmfn - 1 : ram_gmfn + 1,
+ &ram_page[nr_pages]);
+ if ( rc != X86EMUL_OKAY )
+ goto out;
+
+ nr_pages++;
+ count = 1;
+ }
+
+ rc = hvmemul_do_io(is_mmio, addr, count, size, dir, df, 1,
ram_gpa);
+ if ( rc == X86EMUL_OKAY )
+ {
+ v->arch.hvm_vcpu.hvm_io.mmio_retry = (count < *reps);
+ *reps = count;
+ }
- hvmemul_release_page(ram_page);
+ out:
+ while ( --nr_pages >= 0 )
+ hvmemul_release_page(ram_page[nr_pages]);
return rc;
}
@@ -1547,7 +1570,6 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
}
hvmemul_ctxt->exn_pending = 0;
- vio->mmio_retrying = vio->mmio_retry;
vio->mmio_retry = 0;
if ( cpu_has_vmx )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c25b001..810e579 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -440,6 +440,7 @@ static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
void hvm_do_resume(struct vcpu *v)
{
+ struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
struct domain *d = v->domain;
struct hvm_ioreq_server *s;
@@ -468,6 +469,9 @@ void hvm_do_resume(struct vcpu *v)
}
}
+ if ( vio->mmio_retry )
+ (void)handle_mmio();
+
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 78e2f57..20bb4ea 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -123,8 +123,6 @@ static const struct hvm_io_ops portio_ops = {
int hvm_process_io_intercept(const struct hvm_io_handler *handler,
ioreq_t *p)
{
- struct vcpu *curr = current;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
const struct hvm_io_ops *ops = handler->ops;
int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
uint64_t data;
@@ -134,23 +132,12 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
{
for ( i = 0; i < p->count; i++ )
{
- if ( vio->mmio_retrying )
- {
- if ( vio->mmio_large_read_bytes != p->size )
- return X86EMUL_UNHANDLEABLE;
- memcpy(&data, vio->mmio_large_read, p->size);
- vio->mmio_large_read_bytes = 0;
- vio->mmio_retrying = 0;
- }
- else
- {
- addr = (p->type == IOREQ_TYPE_COPY) ?
- p->addr + step * i :
- p->addr;
- rc = ops->read(handler, addr, p->size, &data);
- if ( rc != X86EMUL_OKAY )
- break;
- }
+ addr = (p->type == IOREQ_TYPE_COPY) ?
+ p->addr + step * i :
+ p->addr;
+ rc = ops->read(handler, addr, p->size, &data);
+ if ( rc != X86EMUL_OKAY )
+ break;
if ( p->data_is_ptr )
{
@@ -159,14 +146,12 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
{
case HVMCOPY_okay:
break;
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
- rc = X86EMUL_RETRY;
- break;
case HVMCOPY_bad_gfn_to_mfn:
/* Drop the write as real hardware would. */
continue;
case HVMCOPY_bad_gva_to_gfn:
+ case HVMCOPY_gfn_paged_out:
+ case HVMCOPY_gfn_shared:
ASSERT_UNREACHABLE();
/* fall through */
default:
@@ -179,13 +164,6 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
else
p->data = data;
}
-
- if ( rc == X86EMUL_RETRY )
- {
- vio->mmio_retry = 1;
- vio->mmio_large_read_bytes = p->size;
- memcpy(vio->mmio_large_read, &data, p->size);
- }
}
else /* p->dir == IOREQ_WRITE */
{
@@ -198,14 +176,12 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
{
case HVMCOPY_okay:
break;
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
- rc = X86EMUL_RETRY;
- break;
case HVMCOPY_bad_gfn_to_mfn:
data = ~0;
break;
case HVMCOPY_bad_gva_to_gfn:
+ case HVMCOPY_gfn_paged_out:
+ case HVMCOPY_gfn_shared:
ASSERT_UNREACHABLE();
/* fall through */
default:
@@ -225,19 +201,10 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
if ( rc != X86EMUL_OKAY )
break;
}
-
- if ( rc == X86EMUL_RETRY )
- vio->mmio_retry = 1;
}
- if ( i != 0 )
- {
- if ( rc == X86EMUL_UNHANDLEABLE )
- domain_crash(curr->domain);
-
- p->count = i;
- rc = X86EMUL_OKAY;
- }
+ if ( i != 0 && rc == X86EMUL_UNHANDLEABLE )
+ domain_crash(current->domain);
return rc;
}
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index b15c1e6..2ed0606 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -74,7 +74,7 @@ struct hvm_vcpu_io {
* For string instruction emulation we need to be able to signal a
* necessary retry through other than function return codes.
*/
- bool_t mmio_retry, mmio_retrying;
+ bool_t mmio_retry;
unsigned long msix_unmask_address;
--
1.7.10.4
next prev parent reply other threads:[~2015-07-03 16:25 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-07-03 16:25 [PATCH v6 00/16] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-07-03 16:25 ` [PATCH v6 01/16] x86/hvm: make sure emulation is retried if domain is shutting down Paul Durrant
2015-07-03 16:25 ` [PATCH v6 02/16] x86/hvm: remove multiple open coded 'chunking' loops Paul Durrant
2015-07-03 17:17 ` Andrew Cooper
2015-07-08 15:52 ` Jan Beulich
2015-07-08 15:57 ` Paul Durrant
2015-07-08 16:18 ` Jan Beulich
2015-07-08 16:43 ` Andrew Cooper
2015-07-09 6:53 ` Jan Beulich
2015-07-09 8:15 ` Paul Durrant
2015-07-09 9:19 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 03/16] x86/hvm: change hvm_mmio_read_t and hvm_mmio_write_t length argument Paul Durrant
2015-07-03 16:25 ` [PATCH v6 04/16] x86/hvm: restrict port numbers to uint16_t and sizes to unsigned int Paul Durrant
2015-07-08 15:57 ` Jan Beulich
2015-07-08 15:59 ` Paul Durrant
2015-07-03 16:25 ` [PATCH v6 05/16] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-07-08 16:11 ` Jan Beulich
2015-07-08 16:28 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 06/16] x86/hvm: add length to mmio check op Paul Durrant
2015-07-03 16:25 ` [PATCH v6 07/16] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-07-08 16:29 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 08/16] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-07-08 16:17 ` Jan Beulich
2015-07-09 9:40 ` Paul Durrant
2015-07-09 8:53 ` Jan Beulich
2015-07-09 9:00 ` Paul Durrant
2015-07-09 9:21 ` Jan Beulich
2015-07-09 9:17 ` Jan Beulich
2015-07-03 16:25 ` Paul Durrant [this message]
2015-07-03 17:18 ` [PATCH v6 09/16] x86/hvm: limit reps to avoid the need to handle retry Andrew Cooper
2015-07-09 10:05 ` Jan Beulich
2015-07-09 11:11 ` Paul Durrant
2015-07-09 12:04 ` Jan Beulich
2015-07-09 12:50 ` Paul Durrant
2015-07-09 13:38 ` Jan Beulich
2015-07-09 13:42 ` Paul Durrant
2015-07-09 14:00 ` Paul Durrant
2015-07-09 14:19 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 10/16] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-07-03 16:25 ` [PATCH v6 11/16] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-07-09 10:09 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 12/16] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-07-09 10:13 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 13/16] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-07-03 16:25 ` [PATCH v6 14/16] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-07-03 16:25 ` [PATCH v6 15/16] x86/hvm: always re-emulate I/O from a buffer Paul Durrant
2015-07-03 16:25 ` [PATCH v6 16/16] x86/hvm: track large memory mapped accesses by buffer offset Paul Durrant
2015-07-09 10:33 ` Jan Beulich
2015-07-09 10:36 ` Paul Durrant
2015-07-09 10:34 ` Jan Beulich
2015-07-08 15:44 ` [PATCH v6 00/16] x86/hvm: I/O emulation cleanup and fix Jan Beulich
2015-07-09 11:31 ` Paul Durrant
2015-07-09 11:43 ` David Vrabel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1435940733-20856-10-git-send-email-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).