From: Fam Zheng <famz@redhat.com>
To: qemu-devel@nongnu.org
Cc: Paolo Bonzini <pbonzini@redhat.com>
Subject: [Qemu-devel] [PATCH v4 3/5] exec: Protect map_client_list with mutex
Date: Mon, 16 Mar 2015 17:03:35 +0800 [thread overview]
Message-ID: <1426496617-10702-4-git-send-email-famz@redhat.com> (raw)
In-Reply-To: <1426496617-10702-1-git-send-email-famz@redhat.com>
So that accesses from multiple threads are safe.
Signed-off-by: Fam Zheng <famz@redhat.com>
---
exec.c | 24 +++++++++++++++---------
1 file changed, 15 insertions(+), 9 deletions(-)
diff --git a/exec.c b/exec.c
index 8d21eff..9f6c4f8 100644
--- a/exec.c
+++ b/exec.c
@@ -429,15 +429,6 @@ address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
}
#endif
-void cpu_exec_init_all(void)
-{
-#if !defined(CONFIG_USER_ONLY)
- qemu_mutex_init(&ram_list.mutex);
- memory_map_init();
- io_mem_init();
-#endif
-}
-
#if !defined(CONFIG_USER_ONLY)
static int cpu_common_post_load(void *opaque, int version_id)
@@ -2494,6 +2485,7 @@ typedef struct MapClient {
QLIST_ENTRY(MapClient) link;
} MapClient;
+QemuMutex map_client_list_lock;
static QLIST_HEAD(map_client_list, MapClient) map_client_list
= QLIST_HEAD_INITIALIZER(map_client_list);
@@ -2501,12 +2493,24 @@ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
{
MapClient *client = g_malloc(sizeof(*client));
+ qemu_mutex_lock(&map_client_list_lock);
client->opaque = opaque;
client->callback = callback;
QLIST_INSERT_HEAD(&map_client_list, client, link);
+ qemu_mutex_unlock(&map_client_list_lock);
return client;
}
+void cpu_exec_init_all(void)
+{
+#if !defined(CONFIG_USER_ONLY)
+ qemu_mutex_init(&ram_list.mutex);
+ memory_map_init();
+ io_mem_init();
+#endif
+ qemu_mutex_init(&map_client_list_lock);
+}
+
static void cpu_unregister_map_client(void *_client)
{
MapClient *client = (MapClient *)_client;
@@ -2519,11 +2523,13 @@ static void cpu_notify_map_clients(void)
{
MapClient *client;
+ qemu_mutex_lock(&map_client_list_lock);
while (!QLIST_EMPTY(&map_client_list)) {
client = QLIST_FIRST(&map_client_list);
client->callback(client->opaque);
cpu_unregister_map_client(client);
}
+ qemu_mutex_unlock(&map_client_list_lock);
}
bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
--
1.9.3
next prev parent reply other threads:[~2015-03-16 9:03 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-03-16 9:03 [Qemu-devel] [PATCH v4 0/5] exec: Make bounce buffer thread safe Fam Zheng
2015-03-16 9:03 ` [Qemu-devel] [PATCH v4 1/5] exec: Atomic access to bounce buffer Fam Zheng
2015-03-16 9:03 ` [Qemu-devel] [PATCH v4 2/5] linux-user, bsd-user: Remove two calls to cpu_exec_init_all Fam Zheng
2015-03-16 9:03 ` Fam Zheng [this message]
2015-03-16 10:13 ` [Qemu-devel] [PATCH v4 3/5] exec: Protect map_client_list with mutex Paolo Bonzini
2015-03-16 9:03 ` [Qemu-devel] [PATCH v4 4/5] exec: Notify cpu_register_map_client caller if the bounce buffer is available Fam Zheng
2015-03-16 9:03 ` [Qemu-devel] [PATCH v4 5/5] dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel Fam Zheng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1426496617-10702-4-git-send-email-famz@redhat.com \
--to=famz@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).