xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Julien Grall <julien.grall@arm.com>
To: xen-devel@lists.xen.org
Cc: Julien Grall <julien.grall@arm.com>,
	sstabellini@kernel.org, shankerd@codeaurora.org,
	wei.chen@linaro.org
Subject: [PATCH] xen/arm: io: Protect the handlers with a read-write lock
Date: Tue, 28 Jun 2016 17:34:31 +0100	[thread overview]
Message-ID: <1467131671-24612-1-git-send-email-julien.grall@arm.com> (raw)

Currently, accessing the I/O handlers does not require to take a lock
because new handlers are always added at the end of the array. In a
follow-up patch, this array will be sort to optimize the look up.

Given that most of the time the I/O handlers will not be modify,
using a spinlock will add contention when multiple vCPU are accessing
the emulated MMIOs. So use a read-write lock to protected the handlers.

Finally, take the opportunity to re-indent correctly domain_io_init.

Signed-off-by: Julien Grall <julien.grall@arm.com>
---
 xen/arch/arm/io.c          | 47 +++++++++++++++++++++++++++-------------------
 xen/include/asm-arm/mmio.h |  3 ++-
 2 files changed, 30 insertions(+), 20 deletions(-)

diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index 0156755..5a96836 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -70,23 +70,39 @@ static int handle_write(const struct mmio_handler *handler, struct vcpu *v,
                                handler->priv);
 }
 
-int handle_mmio(mmio_info_t *info)
+static const struct mmio_handler *find_mmio_handler(struct domain *d,
+                                                    paddr_t gpa)
 {
-    struct vcpu *v = current;
-    int i;
-    const struct mmio_handler *handler = NULL;
-    const struct vmmio *vmmio = &v->domain->arch.vmmio;
+    const struct mmio_handler *handler;
+    unsigned int i;
+    struct vmmio *vmmio = &d->arch.vmmio;
+
+    read_lock(&vmmio->lock);
 
     for ( i = 0; i < vmmio->num_entries; i++ )
     {
         handler = &vmmio->handlers[i];
 
-        if ( (info->gpa >= handler->addr) &&
-             (info->gpa < (handler->addr + handler->size)) )
+        if ( (gpa >= handler->addr) &&
+             (gpa < (handler->addr + handler->size)) )
             break;
     }
 
     if ( i == vmmio->num_entries )
+        handler = NULL;
+
+    read_unlock(&vmmio->lock);
+
+    return handler;
+}
+
+int handle_mmio(mmio_info_t *info)
+{
+    struct vcpu *v = current;
+    const struct mmio_handler *handler = NULL;
+
+    handler = find_mmio_handler(v->domain, info->gpa);
+    if ( !handler )
         return 0;
 
     if ( info->dabt.write )
@@ -104,7 +120,7 @@ void register_mmio_handler(struct domain *d,
 
     BUG_ON(vmmio->num_entries >= MAX_IO_HANDLER);
 
-    spin_lock(&vmmio->lock);
+    write_lock(&vmmio->lock);
 
     handler = &vmmio->handlers[vmmio->num_entries];
 
@@ -113,24 +129,17 @@ void register_mmio_handler(struct domain *d,
     handler->size = size;
     handler->priv = priv;
 
-    /*
-     * handle_mmio is not using the lock to avoid contention.
-     * Make sure the other processors see the new handler before
-     * updating the number of entries
-     */
-    dsb(ish);
-
     vmmio->num_entries++;
 
-    spin_unlock(&vmmio->lock);
+    write_unlock(&vmmio->lock);
 }
 
 int domain_io_init(struct domain *d)
 {
-   spin_lock_init(&d->arch.vmmio.lock);
-   d->arch.vmmio.num_entries = 0;
+    rwlock_init(&d->arch.vmmio.lock);
+    d->arch.vmmio.num_entries = 0;
 
-   return 0;
+    return 0;
 }
 
 /*
diff --git a/xen/include/asm-arm/mmio.h b/xen/include/asm-arm/mmio.h
index da1cc2e..32f10f2 100644
--- a/xen/include/asm-arm/mmio.h
+++ b/xen/include/asm-arm/mmio.h
@@ -20,6 +20,7 @@
 #define __ASM_ARM_MMIO_H__
 
 #include <xen/lib.h>
+#include <xen/rwlock.h>
 #include <asm/processor.h>
 #include <asm/regs.h>
 
@@ -51,7 +52,7 @@ struct mmio_handler {
 
 struct vmmio {
     int num_entries;
-    spinlock_t lock;
+    rwlock_t lock;
     struct mmio_handler handlers[MAX_IO_HANDLER];
 };
 
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

             reply	other threads:[~2016-06-28 16:34 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-28 16:34 Julien Grall [this message]
2016-07-11 17:49 ` [PATCH] xen/arm: io: Protect the handlers with a read-write lock Stefano Stabellini
2016-07-11 18:07   ` Julien Grall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1467131671-24612-1-git-send-email-julien.grall@arm.com \
    --to=julien.grall@arm.com \
    --cc=shankerd@codeaurora.org \
    --cc=sstabellini@kernel.org \
    --cc=wei.chen@linaro.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).