From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: stefano.stabellini@eu.citrix.com
Subject: [Qemu-devel] [RFH PATCH 2/2] xen: add a lock for the mapcache
Date: Wed, 14 Jan 2015 11:20:56 +0100 [thread overview]
Message-ID: <1421230856-22736-3-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1421230856-22736-1-git-send-email-pbonzini@redhat.com>
Extend the existing dummy mapcache_lock/unlock macros to cover all of
xen-mapcache.c. This prepares for unlocked memory access, when parts
of exec.c will not be protected by the BQL.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
xen-mapcache.c | 54 +++++++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 43 insertions(+), 11 deletions(-)
diff --git a/xen-mapcache.c b/xen-mapcache.c
index 458069b..8cefd0c 100644
--- a/xen-mapcache.c
+++ b/xen-mapcache.c
@@ -49,9 +49,6 @@
*/
#define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024)
-#define mapcache_lock() ((void)0)
-#define mapcache_unlock() ((void)0)
-
typedef struct MapCacheEntry {
hwaddr paddr_index;
uint8_t *vaddr_base;
@@ -79,11 +76,22 @@ typedef struct MapCache {
unsigned int mcache_bucket_shift;
phys_offset_to_gaddr_t phys_offset_to_gaddr;
+ QemuMutex lock;
void *opaque;
} MapCache;
static MapCache *mapcache;
+static inline void mapcache_lock(void)
+{
+ qemu_mutex_lock(&mapcache->lock);
+}
+
+static inline void mapcache_unlock(void)
+{
+ qemu_mutex_unlock(&mapcache->lock);
+}
+
static inline int test_bits(int nr, int size, const unsigned long *addr)
{
unsigned long res = find_next_zero_bit(addr, size + nr, nr);
@@ -102,6 +110,7 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
mapcache->phys_offset_to_gaddr = f;
mapcache->opaque = opaque;
+ qemu_mutex_init(&mapcache->lock);
QTAILQ_INIT(&mapcache->locked_entries);
@@ -193,8 +202,8 @@ static void xen_remap_bucket(MapCacheEntry *entry,
g_free(err);
}
-uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
- uint8_t lock)
+static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
+ uint8_t lock)
{
MapCacheEntry *entry, *pentry = NULL;
hwaddr address_index;
@@ -291,14 +300,27 @@ tryagain:
return mapcache->last_entry->vaddr_base + address_offset;
}
+uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
+ uint8_t lock)
+{
+ uint8_t *p;
+
+ mapcache_lock();
+ p = xen_map_cache_unlocked(phys_addr, size, lock);
+ mapcache_unlock();
+ return p;
+}
+
ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
{
MapCacheEntry *entry = NULL;
MapCacheRev *reventry;
hwaddr paddr_index;
hwaddr size;
+ ram_addr_t raddr;
int found = 0;
+ mapcache_lock();
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
if (reventry->vaddr_req == ptr) {
paddr_index = reventry->paddr_index;
@@ -323,13 +345,16 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
}
if (!entry) {
DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr);
- return 0;
+ raddr = 0;
+ } else {
+ raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
+ ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
}
- return (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
- ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
+ mapcache_unlock();
+ return raddr;
}
-void xen_invalidate_map_cache_entry(uint8_t *buffer)
+static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer)
{
MapCacheEntry *entry = NULL, *pentry = NULL;
MapCacheRev *reventry;
@@ -383,6 +408,13 @@ void xen_invalidate_map_cache_entry(uint8_t *buffer)
g_free(entry);
}
+void xen_invalidate_map_cache_entry(uint8_t *buffer)
+{
+ mapcache_lock();
+ xen_invalidate_map_cache_entry_unlocked(buffer);
+ mapcache_unlock();
+}
+
void xen_invalidate_map_cache(void)
{
unsigned long i;
@@ -391,14 +423,14 @@ void xen_invalidate_map_cache(void)
/* Flush pending AIO before destroying the mapcache */
bdrv_drain_all();
+ mapcache_lock();
+
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
DPRINTF("There should be no locked mappings at this time, "
"but "TARGET_FMT_plx" -> %p is present\n",
reventry->paddr_index, reventry->vaddr_req);
}
- mapcache_lock();
-
for (i = 0; i < mapcache->nr_buckets; i++) {
MapCacheEntry *entry = &mapcache->entry[i];
--
1.8.3.1
next prev parent reply other threads:[~2015-01-14 10:21 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-01-14 10:20 [Qemu-devel] [RFH PATCH 0/2] xen-mapcache: fixes for unlocked memory access Paolo Bonzini
2015-01-14 10:20 ` [Qemu-devel] [RFH PATCH 1/2] xen: do not use __-named variables in mapcache Paolo Bonzini
2015-01-14 11:44 ` Stefano Stabellini
2015-01-14 10:20 ` Paolo Bonzini [this message]
2015-01-14 11:52 ` [Qemu-devel] [RFH PATCH 2/2] xen: add a lock for the mapcache Stefano Stabellini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1421230856-22736-3-git-send-email-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefano.stabellini@eu.citrix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).