From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Sergey Fedorov" <serge.fdrv@gmail.com>,
"Alex Bennée" <alex.bennee@linaro.org>,
"Sergey Fedorov" <sergey.fedorov@linaro.org>,
"Emilio G. Cota" <cota@braap.org>
Subject: [Qemu-devel] [PATCH 04/12] linux-user: Use QemuMutex and QemuCond
Date: Thu, 1 Sep 2016 12:20:19 +0200 [thread overview]
Message-ID: <1472725227-10374-5-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1472725227-10374-1-git-send-email-pbonzini@redhat.com>
From: Sergey Fedorov <serge.fdrv@gmail.com>
Convert pthread_mutex_t and pthread_cond_t to QemuMutex and QemuCond.
This will allow to make some locks and conditional variables common
between user and system mode emulation.
Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <1470158864-17651-7-git-send-email-alex.bennee@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
linux-user/main.c | 53 +++++++++++++++++++++++++++++++----------------------
1 file changed, 31 insertions(+), 22 deletions(-)
diff --git a/linux-user/main.c b/linux-user/main.c
index f2f4d2f..12a5475 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -111,17 +111,25 @@ int cpu_get_pic_interrupt(CPUX86State *env)
We don't require a full sync, only that no cpus are executing guest code.
The alternative is to map target atomic ops onto host equivalents,
which requires quite a lot of per host/target work. */
-static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
-static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
+static QemuMutex cpu_list_mutex;
+static QemuMutex exclusive_lock;
+static QemuCond exclusive_cond;
+static QemuCond exclusive_resume;
static int pending_cpus;
+void qemu_init_cpu_loop(void)
+{
+ qemu_mutex_init(&cpu_list_mutex);
+ qemu_mutex_init(&exclusive_lock);
+ qemu_cond_init(&exclusive_cond);
+ qemu_cond_init(&exclusive_resume);
+}
+
/* Make sure everything is in a consistent state for calling fork(). */
void fork_start(void)
{
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
mmap_fork_start();
}
@@ -138,14 +146,14 @@ void fork_end(int child)
}
}
pending_cpus = 0;
- pthread_mutex_init(&exclusive_lock, NULL);
- pthread_mutex_init(&cpu_list_mutex, NULL);
- pthread_cond_init(&exclusive_cond, NULL);
- pthread_cond_init(&exclusive_resume, NULL);
+ qemu_mutex_init(&exclusive_lock);
+ qemu_mutex_init(&cpu_list_mutex);
+ qemu_cond_init(&exclusive_cond);
+ qemu_cond_init(&exclusive_resume);
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
gdbserver_fork(thread_cpu);
} else {
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_mutex_unlock(&exclusive_lock);
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
}
}
@@ -155,7 +163,7 @@ void fork_end(int child)
static inline void exclusive_idle(void)
{
while (pending_cpus) {
- pthread_cond_wait(&exclusive_resume, &exclusive_lock);
+ qemu_cond_wait(&exclusive_resume, &exclusive_lock);
}
}
@@ -165,7 +173,7 @@ static inline void start_exclusive(void)
{
CPUState *other_cpu;
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
exclusive_idle();
pending_cpus = 1;
@@ -177,7 +185,7 @@ static inline void start_exclusive(void)
}
}
if (pending_cpus > 1) {
- pthread_cond_wait(&exclusive_cond, &exclusive_lock);
+ qemu_cond_wait(&exclusive_cond, &exclusive_lock);
}
}
@@ -185,42 +193,42 @@ static inline void start_exclusive(void)
static inline void __attribute__((unused)) end_exclusive(void)
{
pending_cpus = 0;
- pthread_cond_broadcast(&exclusive_resume);
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_cond_broadcast(&exclusive_resume);
+ qemu_mutex_unlock(&exclusive_lock);
}
/* Wait for exclusive ops to finish, and begin cpu execution. */
static inline void cpu_exec_start(CPUState *cpu)
{
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
exclusive_idle();
cpu->running = true;
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_mutex_unlock(&exclusive_lock);
}
/* Mark cpu as not executing, and release pending exclusive ops. */
static inline void cpu_exec_end(CPUState *cpu)
{
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
cpu->running = false;
if (pending_cpus > 1) {
pending_cpus--;
if (pending_cpus == 1) {
- pthread_cond_signal(&exclusive_cond);
+ qemu_cond_signal(&exclusive_cond);
}
}
exclusive_idle();
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_mutex_unlock(&exclusive_lock);
}
void cpu_list_lock(void)
{
- pthread_mutex_lock(&cpu_list_mutex);
+ qemu_mutex_lock(&cpu_list_mutex);
}
void cpu_list_unlock(void)
{
- pthread_mutex_unlock(&cpu_list_mutex);
+ qemu_mutex_unlock(&cpu_list_mutex);
}
@@ -4222,6 +4230,7 @@ int main(int argc, char **argv, char **envp)
int ret;
int execfd;
+ qemu_init_cpu_loop();
module_call_init(MODULE_INIT_QOM);
if ((envlist = envlist_create()) == NULL) {
--
2.7.4
next prev parent reply other threads:[~2016-09-01 10:20 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-09-01 10:20 [Qemu-devel] [PATCH v6 00/12] cpu-exec: Safe work in quiescent state Paolo Bonzini
2016-09-01 10:20 ` [Qemu-devel] [PATCH 01/12] cpus: pass CPUState to run_on_cpu helpers Paolo Bonzini
2016-09-01 10:20 ` [Qemu-devel] [PATCH 02/12] cpus: Move common code out of {async_, }run_on_cpu() Paolo Bonzini
2016-09-01 10:20 ` [Qemu-devel] [PATCH 03/12] cpus: Rename flush_queued_work() Paolo Bonzini
2016-09-01 10:20 ` Paolo Bonzini [this message]
2016-09-01 10:20 ` [Qemu-devel] [PATCH 05/12] linux-user: Add qemu_cpu_is_self() and qemu_cpu_kick() Paolo Bonzini
2016-09-01 10:20 ` [Qemu-devel] [PATCH 06/12] cpus-common: move CPU list management to common code Paolo Bonzini
2016-09-05 10:05 ` Alex Bennée
2016-09-05 10:29 ` Paolo Bonzini
2016-09-01 10:20 ` [Qemu-devel] [PATCH 07/12] cpus-common: move CPU work item management to common Paolo Bonzini
2016-09-01 10:20 ` [Qemu-devel] [PATCH 08/12] cpus-common: move exclusive work infrastructure from Paolo Bonzini
2016-09-05 14:55 ` Alex Bennée
2016-09-05 14:57 ` Paolo Bonzini
2016-09-01 10:20 ` [Qemu-devel] [PATCH 09/12] cpus-common: always defer async_run_on_cpu work items Paolo Bonzini
2016-09-05 14:57 ` Alex Bennée
2016-09-01 10:20 ` [Qemu-devel] [PATCH 10/12] cpus-common: Introduce async_safe_run_on_cpu() Paolo Bonzini
2016-09-05 15:08 ` Alex Bennée
2016-09-05 15:14 ` Paolo Bonzini
2016-09-05 15:41 ` Alex Bennée
2016-09-12 18:25 ` Pranith Kumar
2016-09-01 10:20 ` [Qemu-devel] [PATCH 11/12] tcg: Make tb_flush() thread safe Paolo Bonzini
2016-09-01 10:20 ` [Qemu-devel] [PATCH 12/12] cpus-common: lock-free fast path for cpu_exec_start/end Paolo Bonzini
2016-09-05 15:25 ` Alex Bennée
2016-09-05 16:57 ` Paolo Bonzini
2016-09-05 15:51 ` [Qemu-devel] [PATCH v6 00/12] cpu-exec: Safe work in quiescent state Alex Bennée
2016-09-05 17:00 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1472725227-10374-5-git-send-email-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=alex.bennee@linaro.org \
--cc=cota@braap.org \
--cc=qemu-devel@nongnu.org \
--cc=serge.fdrv@gmail.com \
--cc=sergey.fedorov@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).