xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xen.org
Cc: Paul Durrant <paul.durrant@citrix.com>,
	Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v4 4/8] ioreq-server: on-demand creation of ioreq server
Date: Wed, 2 Apr 2014 16:11:32 +0100	[thread overview]
Message-ID: <1396451496-5716-5-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1396451496-5716-1-git-send-email-paul.durrant@citrix.com>

This patch only creates the ioreq server when the legacy HVM parameters
are read (by an emulator).

A lock is introduced to protect access to the ioreq server by multiple
emulator/tool invocations should such an eventuality arise. The guest is
protected by creation of the ioreq server only being done whilst the
domain is paused.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/hvm/hvm.c           |  262 +++++++++++++++++++++++++++++++-------
 xen/include/asm-x86/hvm/domain.h |    1 +
 2 files changed, 215 insertions(+), 48 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5f131c4..4ecbede 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -383,40 +383,38 @@ void hvm_do_resume(struct vcpu *v)
 {
     struct domain *d = v->domain;
     struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
-    ioreq_t *p;
 
     check_wakeup_from_wait();
 
     if ( is_hvm_vcpu(v) )
         pt_restore_timer(v);
 
-    if ( !s )
-        goto check_inject_trap;
-
-    /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
-    p = get_ioreq(s, v);
-    while ( p->state != STATE_IOREQ_NONE )
+    if ( s )
     {
-        switch ( p->state )
+        ioreq_t *p = get_ioreq(s, v);
+
+        while ( p->state != STATE_IOREQ_NONE )
         {
-        case STATE_IORESP_READY: /* IORESP_READY -> NONE */
-            rmb(); /* see IORESP_READY /then/ read contents of ioreq */
-            hvm_io_assist(p);
-            break;
-        case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
-        case STATE_IOREQ_INPROCESS:
-            wait_on_xen_event_channel(p->vp_eport,
-                                      (p->state != STATE_IOREQ_READY) &&
-                                      (p->state != STATE_IOREQ_INPROCESS));
-            break;
-        default:
-            gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
-            domain_crash(v->domain);
-            return; /* bail */
+            switch ( p->state )
+            {
+            case STATE_IORESP_READY: /* IORESP_READY -> NONE */
+                rmb(); /* see IORESP_READY /then/ read contents of ioreq */
+                hvm_io_assist(p);
+                break;
+            case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
+            case STATE_IOREQ_INPROCESS:
+                wait_on_xen_event_channel(p->vp_eport,
+                                          (p->state != STATE_IOREQ_READY) &&
+                                          (p->state != STATE_IOREQ_INPROCESS));
+                break;
+            default:
+                gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
+                domain_crash(d);
+                return; /* bail */
+            }
         }
     }
 
- check_inject_trap:
     /* Inject pending hw/sw trap */
     if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) 
     {
@@ -645,13 +643,68 @@ static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s,
     spin_unlock(&s->lock);
 }
 
-static int hvm_create_ioreq_server(struct domain *d, domid_t domid)
+static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
 {
-    struct hvm_ioreq_server *s;
+    struct list_head *entry, *next;
 
-    s = xzalloc(struct hvm_ioreq_server);
-    if ( !s )
-        return -ENOMEM;
+    spin_lock(&s->lock);
+
+    list_for_each_safe ( entry, next, &s->ioreq_vcpu_list )
+    {
+        struct hvm_ioreq_vcpu *sv = container_of(entry, 
+                                                 struct hvm_ioreq_vcpu, 
+                                                 list_entry);
+        struct vcpu *v = sv->vcpu;
+
+        list_del_init(&sv->list_entry);
+
+        if ( v->vcpu_id == 0 )
+            free_xen_event_channel(v, s->bufioreq_evtchn);
+
+        free_xen_event_channel(v, sv->ioreq_evtchn);
+
+        xfree(sv);
+    }
+
+    spin_unlock(&s->lock);
+}
+
+static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s)
+{
+    struct domain *d = s->domain;
+    unsigned long pfn;
+    int rc;
+
+    pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+    rc = hvm_map_ioreq_page(d, &s->ioreq, pfn);
+    if ( rc )
+        goto fail1;
+
+    pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN];
+    rc = hvm_map_ioreq_page(d, &s->bufioreq, pfn);
+    if ( rc )
+        goto fail2;
+
+    return 0;
+
+fail2:
+    hvm_unmap_ioreq_page(&s->ioreq);
+
+fail1:
+    return rc;
+}
+
+static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
+{
+    hvm_unmap_ioreq_page(&s->bufioreq);
+    hvm_unmap_ioreq_page(&s->ioreq);
+}
+
+static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d,
+                                 domid_t domid)
+{
+    struct vcpu *v;
+    int rc;
 
     s->domain = d;
     s->domid = domid;
@@ -660,29 +713,112 @@ static int hvm_create_ioreq_server(struct domain *d, domid_t domid)
     INIT_LIST_HEAD(&s->ioreq_vcpu_list);
     spin_lock_init(&s->bufioreq_lock);
 
+    rc = hvm_ioreq_server_map_pages(s);
+    if ( rc )
+        return rc;
+
+    for_each_vcpu ( d, v )
+    {
+        rc = hvm_ioreq_server_add_vcpu(s, v);
+        if ( rc )
+            goto fail;
+    }
+
+    return 0;
+
+ fail:
+    hvm_ioreq_server_remove_all_vcpus(s);
+    hvm_ioreq_server_unmap_pages(s);
+
+    return rc;
+}
+
+static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
+{
+    hvm_ioreq_server_remove_all_vcpus(s);
+    hvm_ioreq_server_unmap_pages(s);
+}
+
+static int hvm_create_ioreq_server(struct domain *d, domid_t domid)
+{
+    struct hvm_ioreq_server *s;
+    int rc;
+
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    rc = -EEXIST;
+    if ( d->arch.hvm_domain.ioreq_server != NULL )
+        goto fail1;
+ 
+    rc = -ENOMEM;
+    s = xzalloc(struct hvm_ioreq_server);
+    if ( !s )
+        goto fail2;
+
+    domain_pause(d);
+
+    rc = hvm_ioreq_server_init(s, d, domid);
+    if ( rc )
+        goto fail3;
+
     d->arch.hvm_domain.ioreq_server = s;
+
+    domain_unpause(d);
+
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
     return 0;
+
+ fail3:
+    domain_unpause(d);
+
+    xfree(s);
+
+ fail2:
+ fail1:
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+    return rc;
 }
 
 static void hvm_destroy_ioreq_server(struct domain *d)
 {
-    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+    struct hvm_ioreq_server *s;
 
-    hvm_unmap_ioreq_page(&s->bufioreq);
-    hvm_unmap_ioreq_page(&s->ioreq);
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    s = d->arch.hvm_domain.ioreq_server;
+    if ( !s )
+        goto done;
+
+    d->arch.hvm_domain.ioreq_server = NULL;
+
+    domain_pause(d);
+
+    hvm_ioreq_server_deinit(s);
+
+    domain_unpause(d);
 
     xfree(s);
+
+ done:
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
 }
 
 static int hvm_set_ioreq_pfn(struct domain *d, bool_t buf,
                              unsigned long pfn)
 {
-    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
-    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+    struct hvm_ioreq_server *s;
+    struct hvm_ioreq_page *iorp;
     int rc;
 
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    s = d->arch.hvm_domain.ioreq_server;
+    if ( !s )
+        goto done;
+
     spin_lock(&s->lock);
 
+    iorp = buf ? &s->bufioreq : &s->ioreq;
     rc = hvm_map_ioreq_page(d, iorp, pfn);
     if ( rc )
         goto fail;
@@ -701,10 +837,14 @@ static int hvm_set_ioreq_pfn(struct domain *d, bool_t buf,
     }
 
     spin_unlock(&s->lock);
+
+ done:
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
     return 0;
 
  fail:
     spin_unlock(&s->lock);
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
     return rc;
 }
 
@@ -725,9 +865,15 @@ static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
 
 static int hvm_set_dm_domain(struct domain *d, domid_t domid)
 {
-    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+    struct hvm_ioreq_server *s;
     int rc = 0;
 
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    s = d->arch.hvm_domain.ioreq_server;
+    if ( !s )
+        goto done;
+
     spin_lock(&s->lock);
     domain_pause(d);
 
@@ -764,12 +910,13 @@ static int hvm_set_dm_domain(struct domain *d, domid_t domid)
     domain_unpause(d);
     spin_unlock(&s->lock);
 
+ done:
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
     return rc;
 }
 
 int hvm_domain_initialise(struct domain *d)
 {
-    domid_t domid;
     int rc;
 
     if ( !hvm_enabled )
@@ -795,6 +942,7 @@ int hvm_domain_initialise(struct domain *d)
 
     }
 
+    spin_lock_init(&d->arch.hvm_domain.ioreq_server_lock);
     spin_lock_init(&d->arch.hvm_domain.irq_lock);
     spin_lock_init(&d->arch.hvm_domain.uc_lock);
 
@@ -835,21 +983,14 @@ int hvm_domain_initialise(struct domain *d)
 
     rtc_init(d);
 
-    domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
-    rc = hvm_create_ioreq_server(d, domid);
-    if ( rc != 0 )
-        goto fail2;
-
     register_portio_handler(d, 0xe9, 1, hvm_print_line);
 
     rc = hvm_funcs.domain_initialise(d);
     if ( rc != 0 )
-        goto fail3;
+        goto fail2;
 
     return 0;
 
- fail3:
-    hvm_destroy_ioreq_server(d);
  fail2:
     rtc_deinit(d);
     stdvga_deinit(d);
@@ -1506,7 +1647,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
     struct domain *d = v->domain;
-    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+    struct hvm_ioreq_server *s;
 
     hvm_asid_flush_vcpu(v);
 
@@ -1549,7 +1690,14 @@ int hvm_vcpu_initialise(struct vcpu *v)
          && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */
         goto fail5;
 
-    rc = hvm_ioreq_server_add_vcpu(s, v);
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    s = d->arch.hvm_domain.ioreq_server;
+    if ( s )
+        rc = hvm_ioreq_server_add_vcpu(s, v);
+
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+
     if ( rc != 0 )
         goto fail6;
 
@@ -1586,9 +1734,15 @@ int hvm_vcpu_initialise(struct vcpu *v)
 void hvm_vcpu_destroy(struct vcpu *v)
 {
     struct domain *d = v->domain;
-    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+    struct hvm_ioreq_server *s;
+
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    s = d->arch.hvm_domain.ioreq_server;
+    if ( s )
+        hvm_ioreq_server_remove_vcpu(s, v);
 
-    hvm_ioreq_server_remove_vcpu(s, v);
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
 
     nestedhvm_vcpu_destroy(v);
 
@@ -4464,7 +4618,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
                 domctl_lock_release();
                 break;
             case HVM_PARAM_DM_DOMAIN:
-                /* Not reflexive, as we must domain_pause(). */
+                /* Not reflexive, as we may need to domain_pause(). */
                 rc = -EPERM;
                 if ( curr_d == d )
                     break;
@@ -4570,6 +4724,18 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
             case HVM_PARAM_ACPI_S_STATE:
                 a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0;
                 break;
+            case HVM_PARAM_IOREQ_PFN:
+            case HVM_PARAM_BUFIOREQ_PFN:
+            case HVM_PARAM_BUFIOREQ_EVTCHN: {
+                domid_t domid;
+                
+                /* May need to create server */
+                domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
+                rc = hvm_create_ioreq_server(d, domid);
+                if ( rc != 0 && rc != -EEXIST )
+                    goto param_fail;
+                /*FALLTHRU*/
+            }
             default:
                 a.value = d->arch.hvm_domain.params[a.index];
                 break;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 1f6eaec..b6911f9 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -63,6 +63,7 @@ struct hvm_ioreq_server {
 };
 
 struct hvm_domain {
+    spinlock_t              ioreq_server_lock;
     struct hvm_ioreq_server *ioreq_server;
 
     struct pl_time         pl_time;
-- 
1.7.10.4

  parent reply	other threads:[~2014-04-02 15:11 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-04-02 15:11 [PATCH v4 0/8] Support for running secondary emulators Paul Durrant
2014-04-02 15:11 ` [PATCH v4 1/8] ioreq-server: pre-series tidy up Paul Durrant
2014-04-07 10:48   ` Jan Beulich
2014-04-08  9:13     ` Paul Durrant
2014-04-02 15:11 ` [PATCH v4 2/8] ioreq-server: centralize access to ioreq structures Paul Durrant
2014-04-03 11:22   ` George Dunlap
2014-04-07 11:10   ` Jan Beulich
2014-04-08  9:18     ` Paul Durrant
2014-04-02 15:11 ` [PATCH v4 3/8] ioreq-server: create basic ioreq server abstraction Paul Durrant
2014-04-03 14:49   ` George Dunlap
2014-04-03 15:43     ` Paul Durrant
2014-04-03 15:48       ` George Dunlap
2014-04-03 15:54         ` Paul Durrant
2014-04-07 11:36   ` Jan Beulich
2014-04-08  9:32     ` Paul Durrant
2014-04-08  9:47       ` Jan Beulich
2014-04-08 10:06         ` Paul Durrant
2014-04-02 15:11 ` Paul Durrant [this message]
2014-04-07 11:50   ` [PATCH v4 4/8] ioreq-server: on-demand creation of ioreq server Jan Beulich
2014-04-08  9:35     ` Paul Durrant
2014-04-08  9:51       ` Jan Beulich
2014-04-08 10:11         ` Paul Durrant
2014-04-02 15:11 ` [PATCH v4 5/8] ioreq-server: add support for multiple servers Paul Durrant
2014-04-03 15:32   ` George Dunlap
2014-04-03 15:39     ` Paul Durrant
2014-04-03 15:43       ` George Dunlap
2014-04-03 15:46         ` Paul Durrant
2014-04-07 15:57   ` Ian Campbell
2014-04-08  8:32     ` Paul Durrant
2014-04-08  8:40       ` Ian Campbell
2014-04-08  8:45         ` Paul Durrant
2014-04-09 12:43   ` Jan Beulich
2014-04-09 12:49     ` Ian Campbell
2014-04-09 13:15       ` Jan Beulich
2014-04-09 13:32     ` Paul Durrant
2014-04-09 13:46       ` Jan Beulich
2014-04-09 13:51         ` Paul Durrant
2014-04-09 14:42         ` Ian Campbell
2014-04-02 15:11 ` [PATCH v4 6/8] ioreq-server: remove p2m entries when server is enabled Paul Durrant
2014-04-07 16:00   ` Ian Campbell
2014-04-08  8:33     ` Paul Durrant
2014-04-09 12:20   ` Jan Beulich
2014-04-09 13:36     ` Paul Durrant
2014-04-09 13:50       ` Jan Beulich
2014-04-02 15:11 ` [PATCH v4 7/8] ioreq-server: make buffered ioreq handling optional Paul Durrant
2014-04-07 16:06   ` Ian Campbell
2014-04-08  8:35     ` Paul Durrant
2014-04-02 15:11 ` [PATCH v4 8/8] ioreq-server: bring the PCI hotplug controller implementation into Xen Paul Durrant
2014-04-07 16:14   ` Ian Campbell
2014-04-08  8:25     ` Paul Durrant
2014-04-08  8:45       ` Ian Campbell
2014-04-08  8:49         ` Paul Durrant
2014-04-08  8:57           ` Ian Campbell
2014-04-08  9:00             ` Paul Durrant
2014-04-09 13:34   ` Jan Beulich
2014-04-09 13:42     ` Paul Durrant
2014-04-09 13:53       ` Jan Beulich
2014-04-09 14:25         ` Paul Durrant
2014-04-09 14:47           ` Jan Beulich
2014-04-09 14:59         ` Ian Jackson
2014-04-09 15:06           ` Jan Beulich
2014-04-10 16:04           ` George Dunlap

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1396451496-5716-5-git-send-email-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=keir@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).