From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xen.org
Cc: Paul Durrant <paul.durrant@citrix.com>,
Ian Jackson <ian.jackson@eu.citrix.com>,
Jan Beulich <jbeulich@suse.com>,
Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Subject: [PATCH v7 9/9] ioreq-server: make buffered ioreq handling optional
Date: Fri, 9 May 2014 09:40:04 +0100 [thread overview]
Message-ID: <1399624804-5109-10-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1399624804-5109-1-git-send-email-paul.durrant@citrix.com>
Some emulators will only register regions that require non-buffered
access. (In practice the only region that a guest uses buffered access
for today is the VGA aperture from 0xa0000-0xbffff). This patch therefore
makes allocation of the buffered ioreq page and event channel optional for
secondary ioreq servers.
If a guest attempts buffered access to an ioreq server that does not
support it, the access will be handled via the normal synchronous path.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
---
tools/libxc/xc_domain.c | 2 ++
tools/libxc/xenctrl.h | 2 ++
xen/arch/x86/hvm/hvm.c | 75 +++++++++++++++++++++++++++------------
xen/include/public/hvm/hvm_op.h | 24 ++++++++-----
4 files changed, 71 insertions(+), 32 deletions(-)
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 17a8417..37ed141 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1286,6 +1286,7 @@ int xc_get_hvm_param(xc_interface *handle, domid_t dom, int param, unsigned long
int xc_hvm_create_ioreq_server(xc_interface *xch,
domid_t domid,
+ int handle_bufioreq,
ioservid_t *id)
{
DECLARE_HYPERCALL;
@@ -1301,6 +1302,7 @@ int xc_hvm_create_ioreq_server(xc_interface *xch,
hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
arg->domid = domid;
+ arg->handle_bufioreq = !!handle_bufioreq;
rc = do_xen_hypercall(xch, &hypercall);
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 2045084..400f0df 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1802,11 +1802,13 @@ int xc_get_hvm_param(xc_interface *handle, domid_t dom, int param, unsigned long
*
* @parm xch a handle to an open hypervisor interface.
* @parm domid the domain id to be serviced
+ * @parm handle_bufioreq should the IOREQ Server handle buffered requests?
* @parm id pointer to an ioservid_t to receive the IOREQ Server id.
* @return 0 on success, -1 on failure.
*/
int xc_hvm_create_ioreq_server(xc_interface *xch,
domid_t domid,
+ int handle_bufioreq,
ioservid_t *id);
/**
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 86daecd..67f6f47 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -668,7 +668,7 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
sv->ioreq_evtchn = rc;
- if ( v->vcpu_id == 0 )
+ if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
{
struct domain *d = s->domain;
@@ -719,7 +719,7 @@ static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s,
list_del(&sv->list_entry);
- if ( v->vcpu_id == 0 )
+ if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
free_xen_event_channel(v, s->bufioreq_evtchn);
free_xen_event_channel(v, sv->ioreq_evtchn);
@@ -746,7 +746,7 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
list_del(&sv->list_entry);
- if ( v->vcpu_id == 0 )
+ if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
free_xen_event_channel(v, s->bufioreq_evtchn);
free_xen_event_channel(v, sv->ioreq_evtchn);
@@ -758,7 +758,7 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
}
static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
- bool_t is_default)
+ bool_t is_default, bool_t handle_bufioreq)
{
struct domain *d = s->domain;
unsigned long ioreq_pfn, bufioreq_pfn;
@@ -766,24 +766,34 @@ static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
if ( is_default ) {
ioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+
+ /*
+ * The default ioreq server must handle buffered ioreqs, for
+ * backwards compatibility.
+ */
+ ASSERT(handle_bufioreq);
bufioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN];
} else {
rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn);
if ( rc )
goto fail1;
- rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
- if ( rc )
- goto fail2;
+ if ( handle_bufioreq ) {
+ rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
+ if ( rc )
+ goto fail2;
+ }
}
rc = hvm_map_ioreq_page(s, 0, ioreq_pfn);
if ( rc )
goto fail3;
- rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
- if ( rc )
- goto fail4;
+ if ( handle_bufioreq ) {
+ rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
+ if ( rc )
+ goto fail4;
+ }
return 0;
@@ -791,7 +801,7 @@ fail4:
hvm_unmap_ioreq_page(s, 0);
fail3:
- if ( !is_default )
+ if ( !is_default && handle_bufioreq )
hvm_free_ioreq_gmfn(d, bufioreq_pfn);
fail2:
@@ -806,12 +816,17 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
bool_t is_default)
{
struct domain *d = s->domain;
+ bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
+
+ if ( handle_bufioreq )
+ hvm_unmap_ioreq_page(s, 1);
- hvm_unmap_ioreq_page(s, 1);
hvm_unmap_ioreq_page(s, 0);
if ( !is_default ) {
- hvm_free_ioreq_gmfn(d, s->bufioreq.gmfn);
+ if ( handle_bufioreq )
+ hvm_free_ioreq_gmfn(d, s->bufioreq.gmfn);
+
hvm_free_ioreq_gmfn(d, s->ioreq.gmfn);
}
}
@@ -875,6 +890,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
{
struct domain *d = s->domain;
struct hvm_ioreq_vcpu *sv;
+ bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
spin_lock(&s->lock);
@@ -884,7 +900,9 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
if ( !is_default )
{
hvm_remove_ioreq_gmfn(d, &s->ioreq);
- hvm_remove_ioreq_gmfn(d, &s->bufioreq);
+
+ if ( handle_bufioreq )
+ hvm_remove_ioreq_gmfn(d, &s->bufioreq);
}
s->enabled = 1;
@@ -902,6 +920,7 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
bool_t is_default)
{
struct domain *d = s->domain;
+ bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
spin_lock(&s->lock);
@@ -910,7 +929,9 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
if ( !is_default )
{
- hvm_add_ioreq_gmfn(d, &s->bufioreq);
+ if ( handle_bufioreq )
+ hvm_add_ioreq_gmfn(d, &s->bufioreq);
+
hvm_add_ioreq_gmfn(d, &s->ioreq);
}
@@ -922,7 +943,7 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d,
domid_t domid, bool_t is_default,
- ioservid_t id)
+ bool_t handle_bufioreq, ioservid_t id)
{
struct vcpu *v;
int rc;
@@ -939,7 +960,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d,
if ( rc )
goto fail1;
- rc = hvm_ioreq_server_map_pages(s, is_default);
+ rc = hvm_ioreq_server_map_pages(s, is_default, handle_bufioreq);
if ( rc )
goto fail2;
@@ -998,7 +1019,8 @@ static ioservid_t next_ioservid(struct domain *d)
}
static int hvm_create_ioreq_server(struct domain *d, domid_t domid,
- bool_t is_default, ioservid_t *id)
+ bool_t is_default, bool_t handle_bufioreq,
+ ioservid_t *id)
{
struct hvm_ioreq_server *s;
int rc;
@@ -1015,7 +1037,7 @@ static int hvm_create_ioreq_server(struct domain *d, domid_t domid,
if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
goto fail2;
- rc = hvm_ioreq_server_init(s, d, domid, is_default,
+ rc = hvm_ioreq_server_init(s, d, domid, is_default, handle_bufioreq,
next_ioservid(d));
if ( rc )
goto fail3;
@@ -1108,8 +1130,11 @@ static int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
continue;
*ioreq_pfn = s->ioreq.gmfn;
- *bufioreq_pfn = s->bufioreq.gmfn;
- *bufioreq_port = s->bufioreq_evtchn;
+
+ if ( s->bufioreq.va != NULL ) {
+ *bufioreq_pfn = s->bufioreq.gmfn;
+ *bufioreq_port = s->bufioreq_evtchn;
+ }
rc = 0;
break;
@@ -2368,6 +2393,9 @@ int hvm_buffered_io_send(ioreq_t *p)
iorp = &s->bufioreq;
pg = iorp->va;
+ if ( !pg )
+ return 0;
+
/*
* Return 0 for the cases we can't deal with:
* - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
@@ -5124,7 +5152,8 @@ static int hvmop_create_ioreq_server(
if ( rc != 0 )
goto out;
- rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0, &op.id);
+ rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
+ !!op.handle_bufioreq, &op.id);
if ( rc != 0 )
goto out;
@@ -5548,7 +5577,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
/* May need to create server */
domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
- rc = hvm_create_ioreq_server(d, domid, 1, NULL);
+ rc = hvm_create_ioreq_server(d, domid, 1, 1, NULL);
if ( rc != 0 && rc != -EEXIST )
goto param_fail;
/*FALLTHRU*/
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 6cf1844..6d3e559 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -258,12 +258,15 @@ typedef uint16_t ioservid_t;
* HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
* emulator servicing domain <domid>.
*
- * The <id> handed back is unique for <domid>.
+ * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
+ * the buffered ioreq ring will not be allocated and hence all emulation
+ * requestes to this server will be synchronous.
*/
#define HVMOP_create_ioreq_server 17
struct xen_hvm_create_ioreq_server {
- domid_t domid; /* IN - domain to be serviced */
- ioservid_t id; /* OUT - server id */
+ domid_t domid; /* IN - domain to be serviced */
+ uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
+ ioservid_t id; /* OUT - server id */
};
typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
@@ -273,12 +276,15 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
* IOREQ Server <id>.
*
* The emulator needs to map the synchronous ioreq structures and buffered
- * ioreq ring that Xen uses to request emulation. These are hosted in domain
- * <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn> respectively. In addition the
- * emulator needs to bind to event channel <bufioreq_port> to listen for
- * buffered emulation requests. (The event channels used for synchronous
- * emulation requests are specified in the per-CPU ioreq structures in
- * <ioreq_pfn>).
+ * ioreq ring (if it exists) that Xen uses to request emulation. These are
+ * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * respectively. In addition, if the IOREQ Server is handling buffered
+ * emulation requests, the emulator needs to bind to event channel
+ * <bufioreq_port> to listen for them. (The event channels used for
+ * synchronous emulation requests are specified in the per-CPU ioreq
+ * structures in <ioreq_pfn>).
+ * If the IOREQ Server is not handling buffered emulation requests then the
+ * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
*/
#define HVMOP_get_ioreq_server_info 18
struct xen_hvm_get_ioreq_server_info {
--
1.7.10.4
next prev parent reply other threads:[~2014-05-09 8:40 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-09 8:39 [PATCH v7 0/9] Support for running secondary emulators Paul Durrant
2014-05-09 8:39 ` [PATCH v7 1/9] ioreq-server: pre-series tidy up Paul Durrant
2014-05-09 8:39 ` [PATCH v7 2/9] ioreq-server: centralize access to ioreq structures Paul Durrant
2014-05-09 8:39 ` [PATCH v7 3/9] ioreq-server: create basic ioreq server abstraction Paul Durrant
2014-05-09 13:09 ` Jan Beulich
2014-05-09 15:22 ` Paul Durrant
2014-05-09 8:39 ` [PATCH v7 4/9] ioreq-server: on-demand creation of ioreq server Paul Durrant
2014-05-09 13:12 ` Jan Beulich
2014-05-09 15:22 ` Paul Durrant
2014-05-09 8:40 ` [PATCH v7 5/9] Add an implentation of asprintf() for xen Paul Durrant
2014-05-09 13:06 ` Jan Beulich
2014-05-09 13:08 ` Paul Durrant
2014-05-09 13:15 ` Jan Beulich
2014-05-09 13:19 ` Paul Durrant
2014-05-09 14:15 ` Paul Durrant
2014-05-09 15:47 ` Jan Beulich
2014-05-09 8:40 ` [PATCH v7 6/9] Add the facility to limit ranges per rangeset Paul Durrant
2014-05-09 13:22 ` Jan Beulich
2014-05-09 15:23 ` Paul Durrant
2014-05-09 8:40 ` [PATCH v7 7/9] ioreq-server: add support for multiple servers Paul Durrant
2014-05-09 9:34 ` Ian Campbell
2014-05-09 9:38 ` Ian Campbell
2014-05-09 9:50 ` Paul Durrant
2014-05-09 13:25 ` Ian Campbell
2014-05-09 13:29 ` Paul Durrant
2014-05-09 13:38 ` Jan Beulich
2014-05-09 13:40 ` Paul Durrant
2014-05-12 11:26 ` Jan Beulich
2014-05-12 12:19 ` Paul Durrant
2014-05-19 12:55 ` Paul Durrant
2014-05-19 13:12 ` Jan Beulich
2014-05-19 13:15 ` Paul Durrant
2014-05-09 8:40 ` [PATCH v7 8/9] ioreq-server: remove p2m entries when server is enabled Paul Durrant
2014-05-09 9:36 ` Ian Campbell
2014-05-12 11:34 ` Jan Beulich
2014-05-12 12:21 ` Paul Durrant
2014-05-09 8:40 ` Paul Durrant [this message]
2014-05-12 11:36 ` [PATCH v7 9/9] ioreq-server: make buffered ioreq handling optional Jan Beulich
2014-05-12 12:20 ` Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1399624804-5109-10-git-send-email-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=stefano.stabellini@eu.citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).