From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Paul Durrant <paul.durrant@citrix.com>,
Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v6 07/16] x86/hvm: unify dpci portio intercept with standard portio intercept
Date: Fri, 3 Jul 2015 17:25:24 +0100 [thread overview]
Message-ID: <1435940733-20856-8-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1435940733-20856-1-git-send-email-paul.durrant@citrix.com>
This patch re-works the dpci portio intercepts so that they can be unified
with standard portio handling thereby removing a substantial amount of
code duplication.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/hvm/hvm.c | 2 +
xen/arch/x86/hvm/intercept.c | 20 +---
xen/arch/x86/hvm/io.c | 220 ++++++++++++----------------------------
xen/include/asm-x86/hvm/io.h | 4 +
xen/include/asm-x86/hvm/vcpu.h | 2 +
xen/include/xen/iommu.h | 1 -
6 files changed, 78 insertions(+), 171 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 676f294..c25b001 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1483,6 +1483,8 @@ int hvm_domain_initialise(struct domain *d)
else
d->arch.hvm_domain.io_bitmap = hvm_io_bitmap;
+ register_dpci_portio_handler(d);
+
if ( is_pvh_domain(d) )
{
register_portio_handler(d, 0, 0x10003, handle_pvh_io);
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index ccef38e..91968c4 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -125,10 +125,7 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
{
struct vcpu *curr = current;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
- const struct hvm_io_ops *ops =
- (p->type == IOREQ_TYPE_COPY) ?
- &mmio_ops :
- &portio_ops;
+ const struct hvm_io_ops *ops = handler->ops;
int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
uint64_t data;
uint64_t addr;
@@ -248,10 +245,6 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
const struct hvm_io_handler *hvm_find_io_handler(ioreq_t *p)
{
struct domain *curr_d = current->domain;
- const struct hvm_io_ops *ops =
- (p->type == IOREQ_TYPE_COPY) ?
- &mmio_ops :
- &portio_ops;
unsigned int i;
BUG_ON((p->type != IOREQ_TYPE_PIO) &&
@@ -261,6 +254,7 @@ const struct hvm_io_handler *hvm_find_io_handler(ioreq_t *p)
{
const struct hvm_io_handler *handler =
&curr_d->arch.hvm_domain.io_handler[i];
+ const struct hvm_io_ops *ops = handler->ops;
if ( handler->type != p->type )
continue;
@@ -276,13 +270,7 @@ int hvm_io_intercept(ioreq_t *p)
{
const struct hvm_io_handler *handler;
- if ( p->type == IOREQ_TYPE_PIO )
- {
- int rc = dpci_ioport_intercept(p);
- if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) )
- return rc;
- }
- else if ( p->type == IOREQ_TYPE_COPY )
+ if ( p->type == IOREQ_TYPE_COPY )
{
int rc = stdvga_intercept_mmio(p);
if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) )
@@ -316,6 +304,7 @@ void register_mmio_handler(struct domain *d,
struct hvm_io_handler *handler = hvm_next_io_handler(d);
handler->type = IOREQ_TYPE_COPY;
+ handler->ops = &mmio_ops;
handler->mmio.ops = ops;
}
@@ -325,6 +314,7 @@ void register_portio_handler(struct domain *d, uint16_t port,
struct hvm_io_handler *handler = hvm_next_io_handler(d);
handler->type = IOREQ_TYPE_PIO;
+ handler->ops = &portio_ops;
handler->portio.start = port;
handler->portio.end = port + size;
handler->portio.action = action;
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index c0964ec..2c88ddb 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -208,185 +208,95 @@ void hvm_io_assist(ioreq_t *p)
}
}
-static int dpci_ioport_read(uint32_t mport, ioreq_t *p)
+static bool_t dpci_portio_accept(const struct hvm_io_handler *handler,
+ const ioreq_t *p)
{
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
- int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
- uint32_t data = 0;
+ struct vcpu *curr = current;
+ struct hvm_iommu *hd = domain_hvm_iommu(curr->domain);
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct g2m_ioport *g2m_ioport;
+ unsigned int start, end;
- for ( i = 0; i < p->count; i++ )
+ list_for_each_entry( g2m_ioport, &hd->arch.g2m_ioport_list, list )
{
- if ( vio->mmio_retrying )
- {
- if ( vio->mmio_large_read_bytes != p->size )
- return X86EMUL_UNHANDLEABLE;
- memcpy(&data, vio->mmio_large_read, p->size);
- vio->mmio_large_read_bytes = 0;
- vio->mmio_retrying = 0;
- }
- else switch ( p->size )
+ start = g2m_ioport->gport;
+ end = start + g2m_ioport->np;
+ if ( (p->addr >= start) && (p->addr + p->size <= end) )
{
- case 1:
- data = inb(mport);
- break;
- case 2:
- data = inw(mport);
- break;
- case 4:
- data = inl(mport);
- break;
- default:
- BUG();
+ vio->g2m_ioport = g2m_ioport;
+ return 1;
}
-
- if ( p->data_is_ptr )
- {
- switch ( hvm_copy_to_guest_phys(p->data + step * i,
- &data, p->size) )
- {
- case HVMCOPY_okay:
- break;
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
- rc = X86EMUL_RETRY;
- break;
- case HVMCOPY_bad_gfn_to_mfn:
- /* Drop the write as real hardware would. */
- continue;
- case HVMCOPY_bad_gva_to_gfn:
- ASSERT(0);
- /* fall through */
- default:
- rc = X86EMUL_UNHANDLEABLE;
- break;
- }
- if ( rc != X86EMUL_OKAY)
- break;
- }
- else
- p->data = data;
}
- if ( rc == X86EMUL_RETRY )
- {
- vio->mmio_retry = 1;
- vio->mmio_large_read_bytes = p->size;
- memcpy(vio->mmio_large_read, &data, p->size);
- }
-
- if ( i != 0 )
- {
- p->count = i;
- rc = X86EMUL_OKAY;
- }
-
- return rc;
+ return 0;
}
-static int dpci_ioport_write(uint32_t mport, ioreq_t *p)
+static int dpci_portio_read(const struct hvm_io_handler *handler,
+ uint64_t addr,
+ uint32_t size,
+ uint64_t *data)
{
- int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
- uint32_t data;
-
- for ( i = 0; i < p->count; i++ )
- {
- data = p->data;
- if ( p->data_is_ptr )
- {
- switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
- p->size) )
- {
- case HVMCOPY_okay:
- break;
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
- rc = X86EMUL_RETRY;
- break;
- case HVMCOPY_bad_gfn_to_mfn:
- data = ~0;
- break;
- case HVMCOPY_bad_gva_to_gfn:
- ASSERT(0);
- /* fall through */
- default:
- rc = X86EMUL_UNHANDLEABLE;
- break;
- }
- if ( rc != X86EMUL_OKAY)
- break;
- }
-
- switch ( p->size )
- {
- case 1:
- outb(data, mport);
- break;
- case 2:
- outw(data, mport);
- break;
- case 4:
- outl(data, mport);
- break;
- default:
- BUG();
- }
- }
-
- if ( rc == X86EMUL_RETRY )
- current->arch.hvm_vcpu.hvm_io.mmio_retry = 1;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ const struct g2m_ioport *g2m_ioport = vio->g2m_ioport;
+ unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport;
- if ( i != 0 )
+ switch ( size )
{
- p->count = i;
- rc = X86EMUL_OKAY;
+ case 1:
+ *data = inb(mport);
+ break;
+ case 2:
+ *data = inw(mport);
+ break;
+ case 4:
+ *data = inl(mport);
+ break;
+ default:
+ BUG();
}
- return rc;
+ return X86EMUL_OKAY;
}
-int dpci_ioport_intercept(ioreq_t *p)
+static int dpci_portio_write(const struct hvm_io_handler *handler,
+ uint64_t addr,
+ uint32_t size,
+ uint64_t data)
{
- struct domain *d = current->domain;
- struct hvm_iommu *hd = domain_hvm_iommu(d);
- struct g2m_ioport *g2m_ioport;
- unsigned int mport, gport = p->addr;
- unsigned int s = 0, e = 0;
- int rc;
-
- list_for_each_entry( g2m_ioport, &hd->arch.g2m_ioport_list, list )
- {
- s = g2m_ioport->gport;
- e = s + g2m_ioport->np;
- if ( (gport >= s) && (gport < e) )
- goto found;
- }
-
- return X86EMUL_UNHANDLEABLE;
-
- found:
- mport = (gport - s) + g2m_ioport->mport;
-
- if ( !ioports_access_permitted(d, mport, mport + p->size - 1) )
- {
- gdprintk(XENLOG_ERR, "Error: access to gport=%#x denied!\n",
- (uint32_t)p->addr);
- return X86EMUL_UNHANDLEABLE;
- }
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ const struct g2m_ioport *g2m_ioport = vio->g2m_ioport;
+ unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport;
- switch ( p->dir )
+ switch ( size )
{
- case IOREQ_READ:
- rc = dpci_ioport_read(mport, p);
+ case 1:
+ outb(data, mport);
break;
- case IOREQ_WRITE:
- rc = dpci_ioport_write(mport, p);
+ case 2:
+ outw(data, mport);
+ break;
+ case 4:
+ outl(data, mport);
break;
default:
- gdprintk(XENLOG_ERR, "Error: couldn't handle p->dir = %d", p->dir);
- rc = X86EMUL_UNHANDLEABLE;
+ BUG();
}
- return rc;
+ return X86EMUL_OKAY;
+}
+
+static const struct hvm_io_ops dpci_portio_ops = {
+ .accept = dpci_portio_accept,
+ .read = dpci_portio_read,
+ .write = dpci_portio_write
+};
+
+void register_dpci_portio_handler(struct domain *d)
+{
+ struct hvm_io_handler *handler = hvm_next_io_handler(d);
+
+ handler->type = IOREQ_TYPE_PIO;
+ handler->ops = &dpci_portio_ops;
}
/*
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index 4fc2336..9503e6e 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -72,6 +72,7 @@ struct hvm_io_handler {
portio_action_t action;
} portio;
};
+ const struct hvm_io_ops *ops;
uint8_t type;
};
@@ -144,6 +145,9 @@ int stdvga_intercept_mmio(ioreq_t *p);
void stdvga_deinit(struct domain *d);
extern void hvm_dpci_msi_eoi(struct domain *d, int vector);
+
+void register_dpci_portio_handler(struct domain *d);
+
#endif /* __ASM_X86_HVM_IO_H__ */
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 3d8f4dc..b15c1e6 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -77,6 +77,8 @@ struct hvm_vcpu_io {
bool_t mmio_retry, mmio_retrying;
unsigned long msix_unmask_address;
+
+ const struct g2m_ioport *g2m_ioport;
};
#define VMCX_EADDR (~0ULL)
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index b30bf41..1d00696 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -93,7 +93,6 @@ void pt_pci_init(void);
struct pirq;
int hvm_do_IRQ_dpci(struct domain *, struct pirq *);
-int dpci_ioport_intercept(ioreq_t *p);
int pt_irq_create_bind(struct domain *, xen_domctl_bind_pt_irq_t *);
int pt_irq_destroy_bind(struct domain *, xen_domctl_bind_pt_irq_t *);
--
1.7.10.4
next prev parent reply other threads:[~2015-07-03 16:25 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-07-03 16:25 [PATCH v6 00/16] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-07-03 16:25 ` [PATCH v6 01/16] x86/hvm: make sure emulation is retried if domain is shutting down Paul Durrant
2015-07-03 16:25 ` [PATCH v6 02/16] x86/hvm: remove multiple open coded 'chunking' loops Paul Durrant
2015-07-03 17:17 ` Andrew Cooper
2015-07-08 15:52 ` Jan Beulich
2015-07-08 15:57 ` Paul Durrant
2015-07-08 16:18 ` Jan Beulich
2015-07-08 16:43 ` Andrew Cooper
2015-07-09 6:53 ` Jan Beulich
2015-07-09 8:15 ` Paul Durrant
2015-07-09 9:19 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 03/16] x86/hvm: change hvm_mmio_read_t and hvm_mmio_write_t length argument Paul Durrant
2015-07-03 16:25 ` [PATCH v6 04/16] x86/hvm: restrict port numbers to uint16_t and sizes to unsigned int Paul Durrant
2015-07-08 15:57 ` Jan Beulich
2015-07-08 15:59 ` Paul Durrant
2015-07-03 16:25 ` [PATCH v6 05/16] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-07-08 16:11 ` Jan Beulich
2015-07-08 16:28 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 06/16] x86/hvm: add length to mmio check op Paul Durrant
2015-07-03 16:25 ` Paul Durrant [this message]
2015-07-08 16:29 ` [PATCH v6 07/16] x86/hvm: unify dpci portio intercept with standard portio intercept Jan Beulich
2015-07-03 16:25 ` [PATCH v6 08/16] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-07-08 16:17 ` Jan Beulich
2015-07-09 9:40 ` Paul Durrant
2015-07-09 8:53 ` Jan Beulich
2015-07-09 9:00 ` Paul Durrant
2015-07-09 9:21 ` Jan Beulich
2015-07-09 9:17 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 09/16] x86/hvm: limit reps to avoid the need to handle retry Paul Durrant
2015-07-03 17:18 ` Andrew Cooper
2015-07-09 10:05 ` Jan Beulich
2015-07-09 11:11 ` Paul Durrant
2015-07-09 12:04 ` Jan Beulich
2015-07-09 12:50 ` Paul Durrant
2015-07-09 13:38 ` Jan Beulich
2015-07-09 13:42 ` Paul Durrant
2015-07-09 14:00 ` Paul Durrant
2015-07-09 14:19 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 10/16] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-07-03 16:25 ` [PATCH v6 11/16] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-07-09 10:09 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 12/16] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-07-09 10:13 ` Jan Beulich
2015-07-03 16:25 ` [PATCH v6 13/16] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-07-03 16:25 ` [PATCH v6 14/16] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-07-03 16:25 ` [PATCH v6 15/16] x86/hvm: always re-emulate I/O from a buffer Paul Durrant
2015-07-03 16:25 ` [PATCH v6 16/16] x86/hvm: track large memory mapped accesses by buffer offset Paul Durrant
2015-07-09 10:33 ` Jan Beulich
2015-07-09 10:36 ` Paul Durrant
2015-07-09 10:34 ` Jan Beulich
2015-07-08 15:44 ` [PATCH v6 00/16] x86/hvm: I/O emulation cleanup and fix Jan Beulich
2015-07-09 11:31 ` Paul Durrant
2015-07-09 11:43 ` David Vrabel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1435940733-20856-8-git-send-email-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).