From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Paul Durrant <paul.durrant@citrix.com>,
Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v6 12/16] x86/hvm: remove HVMIO_dispatched I/O state
Date: Fri, 3 Jul 2015 17:25:29 +0100 [thread overview]
Message-ID: <1435940733-20856-13-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1435940733-20856-1-git-send-email-paul.durrant@citrix.com>
By removing the HVMIO_dispatched state and making all pending emulations
(i.e. all those not handled by the hypervisor) use HVMIO_awating_completion,
various code-paths can be simplified.
The completion case for HVMIO_dispatched can also be trivally removed
from hvmemul_do_io() as it was already unreachable. This is because that
state was only ever used for writes or I/O to/from a guest page and
hvmemul_do_io() is never called to complete such I/O.
NOTE: There is one sublety in handle_pio()... The only case when
handle_pio() got a return code of X86EMUL_RETRY back from
hvmemul_do_pio_buffer() and found io_state was not
HVMIO_awaiting_completion was in the case where the domain is
shutting down. This is because all writes normally yield a return
of HVMEMUL_OKAY and all reads put io_state into
HVMIO_awaiting_completion. Hence the io_state check there is
replaced with a check of the is_shutting_down flag on the domain.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/hvm/emulate.c | 12 +++---------
xen/arch/x86/hvm/hvm.c | 12 +++---------
xen/arch/x86/hvm/io.c | 14 +++++++-------
xen/arch/x86/hvm/vmx/realmode.c | 2 +-
xen/include/asm-x86/hvm/vcpu.h | 10 +++++++++-
5 files changed, 23 insertions(+), 27 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index d1775ac..c694bb1 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -137,20 +137,14 @@ static int hvmemul_do_io(
if ( data_is_addr || dir == IOREQ_WRITE )
return X86EMUL_UNHANDLEABLE;
goto finish_access;
- case HVMIO_dispatched:
- /* May have to wait for previous cycle of a multi-write to complete. */
- if ( is_mmio && !data_is_addr && (dir == IOREQ_WRITE) &&
- (addr == (vio->mmio_large_write_pa +
- vio->mmio_large_write_bytes)) )
- return X86EMUL_RETRY;
- /* fallthrough */
default:
return X86EMUL_UNHANDLEABLE;
}
- vio->io_state = (data_is_addr || dir == IOREQ_WRITE) ?
- HVMIO_dispatched : HVMIO_awaiting_completion;
+ vio->io_state = HVMIO_awaiting_completion;
vio->io_size = size;
+ vio->io_dir = dir;
+ vio->io_data_is_addr = data_is_addr;
if ( dir == IOREQ_WRITE )
{
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 98d1536..4aa7ed2 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -416,22 +416,16 @@ static void hvm_io_assist(ioreq_t *p)
{
struct vcpu *curr = current;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
- enum hvm_io_state io_state;
p->state = STATE_IOREQ_NONE;
- io_state = vio->io_state;
- vio->io_state = HVMIO_none;
-
- switch ( io_state )
+ if ( hvm_vcpu_io_need_completion(vio) )
{
- case HVMIO_awaiting_completion:
vio->io_state = HVMIO_completed;
vio->io_data = p->data;
- break;
- default:
- break;
}
+ else
+ vio->io_state = HVMIO_none;
msix_write_completion(curr);
vcpu_end_shutdown_deferral(curr);
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 221d05e..3b51d59 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -90,9 +90,7 @@ int handle_mmio(void)
rc = hvm_emulate_one(&ctxt);
- if ( rc != X86EMUL_RETRY )
- vio->io_state = HVMIO_none;
- if ( vio->io_state == HVMIO_awaiting_completion || vio->mmio_retry )
+ if ( hvm_vcpu_io_need_completion(vio) || vio->mmio_retry )
vio->io_completion = HVMIO_mmio_completion;
else
vio->mmio_access = (struct npfec){};
@@ -142,6 +140,9 @@ int handle_pio(uint16_t port, unsigned int size, int dir)
rc = hvmemul_do_pio_buffer(port, size, dir, &data);
+ if ( hvm_vcpu_io_need_completion(vio) )
+ vio->io_completion = HVMIO_pio_completion;
+
switch ( rc )
{
case X86EMUL_OKAY:
@@ -154,11 +155,10 @@ int handle_pio(uint16_t port, unsigned int size, int dir)
}
break;
case X86EMUL_RETRY:
- if ( vio->io_state != HVMIO_awaiting_completion )
+ /* We should not advance RIP/EIP if the domain is shutting down */
+ if ( curr->domain->is_shutting_down )
return 0;
- /* Completion in hvm_io_assist() with no re-emulation required. */
- ASSERT(dir == IOREQ_READ);
- vio->io_completion = HVMIO_pio_completion;
+
break;
default:
gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc);
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 76ff9a5..deb53ae 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -111,7 +111,7 @@ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
rc = hvm_emulate_one(hvmemul_ctxt);
- if ( vio->io_state == HVMIO_awaiting_completion || vio->mmio_retry )
+ if ( hvm_vcpu_io_need_completion(vio) || vio->mmio_retry )
vio->io_completion = HVMIO_realmode_completion;
if ( rc == X86EMUL_UNHANDLEABLE )
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index dbdee14..de8cdd7 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -32,7 +32,6 @@
enum hvm_io_state {
HVMIO_none = 0,
- HVMIO_dispatched,
HVMIO_awaiting_completion,
HVMIO_completed
};
@@ -55,6 +54,8 @@ struct hvm_vcpu_io {
unsigned long io_data;
unsigned int io_size;
enum hvm_io_completion io_completion;
+ uint8_t io_dir;
+ uint8_t io_data_is_addr;
/*
* HVM emulation:
@@ -87,6 +88,13 @@ struct hvm_vcpu_io {
const struct g2m_ioport *g2m_ioport;
};
+static inline bool_t hvm_vcpu_io_need_completion(const struct hvm_vcpu_io *vio)
+{
+ return (vio->io_state == HVMIO_awaiting_completion) &&
+ !vio->io_data_is_addr &&
+ (vio->io_dir == IOREQ_READ);
+}
+
#define VMCX_EADDR (~0ULL)
struct nestedvcpu {
--
1.7.10.4
next prev parent reply other threads:[~2015-07-03 16:45 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-07-03 16:25 [PATCH v6 00/16] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-07-03 16:25 ` [PATCH v6 01/16] x86/hvm: make sure emulation is retried if domain is shutting down Paul Durrant
2015-07-03 16:25 ` [PATCH v6 02/16] x86/hvm: remove multiple open coded 'chunking' loops Paul Durrant
2015-07-03 17:17 ` Andrew Cooper
2015-07-08 15:52 ` Jan Beulich
2015-07-08 15:57 ` Paul Durrant
2015-07-08 16:18 ` Jan Beulich
2015-07-08 16:43 ` Andrew Cooper
2015-07-09 6:53 ` Jan Beulich
2015-07-09 8:15 ` Paul Durrant
2015-07-09 9:19 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 03/16] x86/hvm: change hvm_mmio_read_t and hvm_mmio_write_t length argument Paul Durrant
2015-07-03 16:25 ` [PATCH v6 04/16] x86/hvm: restrict port numbers to uint16_t and sizes to unsigned int Paul Durrant
2015-07-08 15:57 ` Jan Beulich
2015-07-08 15:59 ` Paul Durrant
2015-07-03 16:25 ` [PATCH v6 05/16] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-07-08 16:11 ` Jan Beulich
2015-07-08 16:28 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 06/16] x86/hvm: add length to mmio check op Paul Durrant
2015-07-03 16:25 ` [PATCH v6 07/16] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-07-08 16:29 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 08/16] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-07-08 16:17 ` Jan Beulich
2015-07-09 9:40 ` Paul Durrant
2015-07-09 8:53 ` Jan Beulich
2015-07-09 9:00 ` Paul Durrant
2015-07-09 9:21 ` Jan Beulich
2015-07-09 9:17 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 09/16] x86/hvm: limit reps to avoid the need to handle retry Paul Durrant
2015-07-03 17:18 ` Andrew Cooper
2015-07-09 10:05 ` Jan Beulich
2015-07-09 11:11 ` Paul Durrant
2015-07-09 12:04 ` Jan Beulich
2015-07-09 12:50 ` Paul Durrant
2015-07-09 13:38 ` Jan Beulich
2015-07-09 13:42 ` Paul Durrant
2015-07-09 14:00 ` Paul Durrant
2015-07-09 14:19 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 10/16] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-07-03 16:25 ` [PATCH v6 11/16] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-07-09 10:09 ` Jan Beulich
2015-07-03 16:25 ` Paul Durrant [this message]
2015-07-09 10:13 ` [PATCH v6 12/16] x86/hvm: remove HVMIO_dispatched I/O state Jan Beulich
2015-07-03 16:25 ` [PATCH v6 13/16] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-07-03 16:25 ` [PATCH v6 14/16] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-07-03 16:25 ` [PATCH v6 15/16] x86/hvm: always re-emulate I/O from a buffer Paul Durrant
2015-07-03 16:25 ` [PATCH v6 16/16] x86/hvm: track large memory mapped accesses by buffer offset Paul Durrant
2015-07-09 10:33 ` Jan Beulich
2015-07-09 10:36 ` Paul Durrant
2015-07-09 10:34 ` Jan Beulich
2015-07-08 15:44 ` [PATCH v6 00/16] x86/hvm: I/O emulation cleanup and fix Jan Beulich
2015-07-09 11:31 ` Paul Durrant
2015-07-09 11:43 ` David Vrabel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1435940733-20856-13-git-send-email-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).