xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xen.org
Cc: Paul Durrant <paul.durrant@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Ian Campbell <ian.campbell@citrix.com>,
	Jan Beulich <jbeulich@suse.com>,
	Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Subject: [PATCH v7 8/9] ioreq-server: remove p2m entries when server is enabled
Date: Fri, 9 May 2014 09:40:03 +0100	[thread overview]
Message-ID: <1399624804-5109-9-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1399624804-5109-1-git-send-email-paul.durrant@citrix.com>

For secondary servers, add a hvm op to enable/disable the server. The
server will not accept IO until it is enabled and the act of enabling
the server removes its pages from the guest p2m, thus preventing the guest
from directly mapping the pages and synthesizing ioreqs.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
---
 tools/libxc/xc_domain.c          |   27 +++++++
 tools/libxc/xenctrl.h            |   18 +++++
 xen/arch/x86/hvm/hvm.c           |  152 +++++++++++++++++++++++++++++++++++++-
 xen/include/asm-x86/hvm/domain.h |    1 +
 xen/include/public/hvm/hvm_op.h  |   19 +++++
 5 files changed, 215 insertions(+), 2 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index a8c9f81..17a8417 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1508,6 +1508,33 @@ int xc_hvm_destroy_ioreq_server(xc_interface *xch,
     return rc;
 }
 
+int xc_hvm_set_ioreq_server_state(xc_interface *xch,
+                                  domid_t domid,
+                                  ioservid_t id,
+                                  int enabled)
+{
+    DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_set_ioreq_server_state_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    hypercall.op     = __HYPERVISOR_hvm_op;
+    hypercall.arg[0] = HVMOP_set_ioreq_server_state;
+    hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+
+    arg->domid = domid;
+    arg->id = id;
+    arg->enabled = !!enabled;
+
+    rc = do_xen_hypercall(xch, &hypercall);
+
+    xc_hypercall_buffer_free(xch, arg);
+    return rc;
+}
+
 int xc_domain_setdebugging(xc_interface *xch,
                            uint32_t domid,
                            unsigned int enable)
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 3260a56..2045084 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1829,6 +1829,24 @@ int xc_hvm_get_ioreq_server_info(xc_interface *xch,
                                  evtchn_port_t *bufioreq_port);
 
 /**
+ * This function sets IOREQ Server state. An IOREQ Server
+ * will not be passed emulation requests until it is in
+ * the enabled state.
+ * Note that the contents of the ioreq_pfn and bufioreq_pfn are
+ * not meaningful until the IOREQ Server is in the enabled state.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @parm enabled the state.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_hvm_set_ioreq_server_state(xc_interface *xch,
+                                  domid_t domid,
+                                  ioservid_t id,
+                                  int enabled);
+
+/**
  * This function registers a range of memory or I/O ports for emulation.
  *
  * @parm xch a handle to an open hypervisor interface.
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 58cc478..86daecd 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -554,6 +554,22 @@ static int hvm_map_ioreq_page(
     return 0;
 }
 
+static void hvm_remove_ioreq_gmfn(
+    struct domain *d, struct hvm_ioreq_page *iorp)
+{
+    guest_physmap_remove_page(d, iorp->gmfn, 
+                              page_to_mfn(iorp->page), 0);
+    clear_page(iorp->va);
+}
+
+static int hvm_add_ioreq_gmfn(
+    struct domain *d, struct hvm_ioreq_page *iorp)
+{
+    clear_page(iorp->va);
+    return guest_physmap_add_page(d, iorp->gmfn,
+                                  page_to_mfn(iorp->page), 0);
+}
+
 static int hvm_print_line(
     int dir, uint32_t port, uint32_t bytes, uint32_t *val)
 {
@@ -670,7 +686,8 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
 
     list_add(&sv->list_entry, &s->ioreq_vcpu_list);
 
-    hvm_update_ioreq_evtchn(s, sv);
+    if ( s->enabled )
+        hvm_update_ioreq_evtchn(s, sv);
 
     spin_unlock(&s->lock);
     return 0;
@@ -853,6 +870,56 @@ static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
         rangeset_destroy(s->range[i]);
 }
 
+static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
+                                    bool_t is_default)
+{
+    struct domain *d = s->domain;
+    struct hvm_ioreq_vcpu *sv;
+
+    spin_lock(&s->lock);
+
+    if ( s->enabled )
+        goto done;
+
+    if ( !is_default )
+    {
+        hvm_remove_ioreq_gmfn(d, &s->ioreq);
+        hvm_remove_ioreq_gmfn(d, &s->bufioreq);
+    }
+
+    s->enabled = 1;
+
+    list_for_each_entry ( sv,
+                          &s->ioreq_vcpu_list,
+                          list_entry )
+        hvm_update_ioreq_evtchn(s, sv);
+
+  done:
+    spin_unlock(&s->lock);
+}
+
+static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
+                                    bool_t is_default)
+{
+    struct domain *d = s->domain;
+
+    spin_lock(&s->lock);
+
+    if ( !s->enabled )
+        goto done;
+
+    if ( !is_default )
+    {
+        hvm_add_ioreq_gmfn(d, &s->bufioreq);
+        hvm_add_ioreq_gmfn(d, &s->ioreq);
+    }
+
+    s->enabled = 0;
+
+ done:
+    spin_unlock(&s->lock);
+}
+
 static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d,
                                  domid_t domid, bool_t is_default,
                                  ioservid_t id)
@@ -958,7 +1025,10 @@ static int hvm_create_ioreq_server(struct domain *d, domid_t domid,
     d->arch.hvm_domain.ioreq_server.count++;
 
     if ( is_default )
+    {
         d->arch.hvm_domain.default_ioreq_server = s;
+        hvm_ioreq_server_enable(s, 1);
+    }
 
     if (id != NULL)
         *id = s->id;
@@ -1144,6 +1214,45 @@ static int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
     return rc;
 }
 
+static int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
+                                      bool_t enabled)
+{
+    struct list_head *entry;
+    int rc;
+
+    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+
+    rc = -ENOENT;
+    list_for_each ( entry,
+                    &d->arch.hvm_domain.ioreq_server.list )
+    {
+        struct hvm_ioreq_server *s = list_entry(entry,
+                                                struct hvm_ioreq_server,
+                                                list_entry);
+
+        if ( s == d->arch.hvm_domain.default_ioreq_server )
+            continue;
+
+        if ( s->id != id )
+            continue;
+
+        domain_pause(d);
+
+        if ( enabled )
+            hvm_ioreq_server_enable(s, 0);
+        else
+            hvm_ioreq_server_disable(s, 0);
+
+        domain_unpause(d);
+
+        rc = 0;
+        break;
+    }
+
+    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    return rc;
+}
+
 static int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
 {
     struct hvm_ioreq_server *s;
@@ -1206,8 +1315,10 @@ static void hvm_destroy_all_ioreq_servers(struct domain *d)
 
         domain_pause(d);
 
-        if ( is_default )
+        if ( is_default ) {
+            hvm_ioreq_server_disable(s, 1);
             d->arch.hvm_domain.default_ioreq_server = NULL;
+        }
 
         --d->arch.hvm_domain.ioreq_server.count;
         list_del(&s->list_entry);
@@ -2200,6 +2311,9 @@ static struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
+        if ( !s->enabled )
+            continue;
+
         BUILD_BUG_ON(IOREQ_TYPE_PIO != HVMOP_IO_RANGE_PORT);
         BUILD_BUG_ON(IOREQ_TYPE_COPY != HVMOP_IO_RANGE_MEMORY);
         BUILD_BUG_ON(IOREQ_TYPE_PCI_CONFIG != HVMOP_IO_RANGE_PCI);
@@ -5117,6 +5231,35 @@ static int hvmop_unmap_io_range_from_ioreq_server(
     return rc;
 }
 
+static int hvmop_set_ioreq_server_state(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop)
+{
+    xen_hvm_set_ioreq_server_state_t op;
+    struct domain *d;
+    int rc;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
+    if ( rc != 0 )
+        return rc;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d);
+    if ( rc != 0 )
+        goto out;
+
+    rc = hvm_set_ioreq_server_state(d, op.id, !!op.enabled);
+
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
 static int hvmop_destroy_ioreq_server(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop)
 {
@@ -5176,6 +5319,11 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
         rc = hvmop_unmap_io_range_from_ioreq_server(
             guest_handle_cast(arg, xen_hvm_io_range_t));
         break;
+
+    case HVMOP_set_ioreq_server_state:
+        rc = hvmop_set_ioreq_server_state(
+            guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t));
+        break;
     
     case HVMOP_destroy_ioreq_server:
         rc = hvmop_destroy_ioreq_server(
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index a3e2727..9caeb41 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -69,6 +69,7 @@ struct hvm_ioreq_server {
     spinlock_t             bufioreq_lock;
     evtchn_port_t          bufioreq_evtchn;
     struct rangeset        *range[NR_IO_RANGE_TYPES];
+    bool_t                 enabled;
 };
 
 struct hvm_domain {
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index a335235..6cf1844 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -339,6 +339,25 @@ struct xen_hvm_destroy_ioreq_server {
 typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
 
+/*
+ * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing
+ *                               domain <domid>.
+ *
+ * The IOREQ Server will not be passed any emulation requests until it is in the
+ * enabled state.
+ * Note that the contents of the ioreq_pfn and bufioreq_fn (see
+ * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in
+ * the enabled state.
+ */
+#define HVMOP_set_ioreq_server_state 22
+struct xen_hvm_set_ioreq_server_state {
+    domid_t domid;   /* IN - domain to be serviced */
+    ioservid_t id;   /* IN - server id */
+    uint8_t enabled; /* IN - enabled? */    
+};
+typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
+
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
-- 
1.7.10.4

  parent reply	other threads:[~2014-05-09  8:40 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-05-09  8:39 [PATCH v7 0/9] Support for running secondary emulators Paul Durrant
2014-05-09  8:39 ` [PATCH v7 1/9] ioreq-server: pre-series tidy up Paul Durrant
2014-05-09  8:39 ` [PATCH v7 2/9] ioreq-server: centralize access to ioreq structures Paul Durrant
2014-05-09  8:39 ` [PATCH v7 3/9] ioreq-server: create basic ioreq server abstraction Paul Durrant
2014-05-09 13:09   ` Jan Beulich
2014-05-09 15:22     ` Paul Durrant
2014-05-09  8:39 ` [PATCH v7 4/9] ioreq-server: on-demand creation of ioreq server Paul Durrant
2014-05-09 13:12   ` Jan Beulich
2014-05-09 15:22     ` Paul Durrant
2014-05-09  8:40 ` [PATCH v7 5/9] Add an implentation of asprintf() for xen Paul Durrant
2014-05-09 13:06   ` Jan Beulich
2014-05-09 13:08     ` Paul Durrant
2014-05-09 13:15       ` Jan Beulich
2014-05-09 13:19         ` Paul Durrant
2014-05-09 14:15         ` Paul Durrant
2014-05-09 15:47           ` Jan Beulich
2014-05-09  8:40 ` [PATCH v7 6/9] Add the facility to limit ranges per rangeset Paul Durrant
2014-05-09 13:22   ` Jan Beulich
2014-05-09 15:23     ` Paul Durrant
2014-05-09  8:40 ` [PATCH v7 7/9] ioreq-server: add support for multiple servers Paul Durrant
2014-05-09  9:34   ` Ian Campbell
2014-05-09  9:38     ` Ian Campbell
2014-05-09  9:50     ` Paul Durrant
2014-05-09 13:25       ` Ian Campbell
2014-05-09 13:29         ` Paul Durrant
2014-05-09 13:38           ` Jan Beulich
2014-05-09 13:40             ` Paul Durrant
2014-05-12 11:26   ` Jan Beulich
2014-05-12 12:19     ` Paul Durrant
2014-05-19 12:55     ` Paul Durrant
2014-05-19 13:12       ` Jan Beulich
2014-05-19 13:15         ` Paul Durrant
2014-05-09  8:40 ` Paul Durrant [this message]
2014-05-09  9:36   ` [PATCH v7 8/9] ioreq-server: remove p2m entries when server is enabled Ian Campbell
2014-05-12 11:34   ` Jan Beulich
2014-05-12 12:21     ` Paul Durrant
2014-05-09  8:40 ` [PATCH v7 9/9] ioreq-server: make buffered ioreq handling optional Paul Durrant
2014-05-12 11:36   ` Jan Beulich
2014-05-12 12:20     ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1399624804-5109-9-git-send-email-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).