qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Alex Bennée" <alex.bennee@linaro.org>
To: mttcg@listserver.greensocs.com, qemu-devel@nongnu.org,
	fred.konrad@greensocs.com, a.rigo@virtualopensystems.com,
	serge.fdrv@gmail.com, cota@braap.org, bobby.prani@gmail.com
Cc: mark.burton@greensocs.com, pbonzini@redhat.com,
	jan.kiszka@siemens.com, rth@twiddle.net,
	peter.maydell@linaro.org, claudio.fontana@huawei.com,
	"Sergey Fedorov" <sergey.fedorov@linaro.org>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Riku Voipio" <riku.voipio@iki.fi>
Subject: [Qemu-devel] [PATCH v5 06/13] linux-user: Use QemuMutex and QemuCond
Date: Tue,  2 Aug 2016 18:27:37 +0100	[thread overview]
Message-ID: <1470158864-17651-7-git-send-email-alex.bennee@linaro.org> (raw)
In-Reply-To: <1470158864-17651-1-git-send-email-alex.bennee@linaro.org>

From: Sergey Fedorov <serge.fdrv@gmail.com>

Convert pthread_mutex_t and pthread_cond_t to QemuMutex and QemuCond.
This will allow to make some locks and conditional variables common
between user and system mode emulation.

Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
 linux-user/main.c | 53 +++++++++++++++++++++++++++++++----------------------
 1 file changed, 31 insertions(+), 22 deletions(-)

diff --git a/linux-user/main.c b/linux-user/main.c
index 462e820..c80caeb 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -111,17 +111,25 @@ int cpu_get_pic_interrupt(CPUX86State *env)
    We don't require a full sync, only that no cpus are executing guest code.
    The alternative is to map target atomic ops onto host equivalents,
    which requires quite a lot of per host/target work.  */
-static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
-static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
+static QemuMutex cpu_list_mutex;
+static QemuMutex exclusive_lock;
+static QemuCond exclusive_cond;
+static QemuCond exclusive_resume;
 static int pending_cpus;
 
+void qemu_init_cpu_loop(void)
+{
+    qemu_mutex_init(&cpu_list_mutex);
+    qemu_mutex_init(&exclusive_lock);
+    qemu_cond_init(&exclusive_cond);
+    qemu_cond_init(&exclusive_resume);
+}
+
 /* Make sure everything is in a consistent state for calling fork().  */
 void fork_start(void)
 {
     qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
-    pthread_mutex_lock(&exclusive_lock);
+    qemu_mutex_lock(&exclusive_lock);
     mmap_fork_start();
 }
 
@@ -138,14 +146,14 @@ void fork_end(int child)
             }
         }
         pending_cpus = 0;
-        pthread_mutex_init(&exclusive_lock, NULL);
-        pthread_mutex_init(&cpu_list_mutex, NULL);
-        pthread_cond_init(&exclusive_cond, NULL);
-        pthread_cond_init(&exclusive_resume, NULL);
+        qemu_mutex_init(&exclusive_lock);
+        qemu_mutex_init(&cpu_list_mutex);
+        qemu_cond_init(&exclusive_cond);
+        qemu_cond_init(&exclusive_resume);
         qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
         gdbserver_fork(thread_cpu);
     } else {
-        pthread_mutex_unlock(&exclusive_lock);
+        qemu_mutex_unlock(&exclusive_lock);
         qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
     }
 }
@@ -155,7 +163,7 @@ void fork_end(int child)
 static inline void exclusive_idle(void)
 {
     while (pending_cpus) {
-        pthread_cond_wait(&exclusive_resume, &exclusive_lock);
+        qemu_cond_wait(&exclusive_resume, &exclusive_lock);
     }
 }
 
@@ -165,7 +173,7 @@ static inline void start_exclusive(void)
 {
     CPUState *other_cpu;
 
-    pthread_mutex_lock(&exclusive_lock);
+    qemu_mutex_lock(&exclusive_lock);
     exclusive_idle();
 
     pending_cpus = 1;
@@ -177,7 +185,7 @@ static inline void start_exclusive(void)
         }
     }
     if (pending_cpus > 1) {
-        pthread_cond_wait(&exclusive_cond, &exclusive_lock);
+        qemu_cond_wait(&exclusive_cond, &exclusive_lock);
     }
 }
 
@@ -185,42 +193,42 @@ static inline void start_exclusive(void)
 static inline void __attribute__((unused)) end_exclusive(void)
 {
     pending_cpus = 0;
-    pthread_cond_broadcast(&exclusive_resume);
-    pthread_mutex_unlock(&exclusive_lock);
+    qemu_cond_broadcast(&exclusive_resume);
+    qemu_mutex_unlock(&exclusive_lock);
 }
 
 /* Wait for exclusive ops to finish, and begin cpu execution.  */
 static inline void cpu_exec_start(CPUState *cpu)
 {
-    pthread_mutex_lock(&exclusive_lock);
+    qemu_mutex_lock(&exclusive_lock);
     exclusive_idle();
     cpu->running = true;
-    pthread_mutex_unlock(&exclusive_lock);
+    qemu_mutex_unlock(&exclusive_lock);
 }
 
 /* Mark cpu as not executing, and release pending exclusive ops.  */
 static inline void cpu_exec_end(CPUState *cpu)
 {
-    pthread_mutex_lock(&exclusive_lock);
+    qemu_mutex_lock(&exclusive_lock);
     cpu->running = false;
     if (pending_cpus > 1) {
         pending_cpus--;
         if (pending_cpus == 1) {
-            pthread_cond_signal(&exclusive_cond);
+            qemu_cond_signal(&exclusive_cond);
         }
     }
     exclusive_idle();
-    pthread_mutex_unlock(&exclusive_lock);
+    qemu_mutex_unlock(&exclusive_lock);
 }
 
 void cpu_list_lock(void)
 {
-    pthread_mutex_lock(&cpu_list_mutex);
+    qemu_mutex_lock(&cpu_list_mutex);
 }
 
 void cpu_list_unlock(void)
 {
-    pthread_mutex_unlock(&cpu_list_mutex);
+    qemu_mutex_unlock(&cpu_list_mutex);
 }
 
 
@@ -4222,6 +4230,7 @@ int main(int argc, char **argv, char **envp)
     int ret;
     int execfd;
 
+    qemu_init_cpu_loop();
     module_call_init(MODULE_INIT_QOM);
 
     if ((envlist = envlist_create()) == NULL) {
-- 
2.7.4

  parent reply	other threads:[~2016-08-02 17:27 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-08-02 17:27 [Qemu-devel] [PATCH v5 00/13] cpu-exec: Safe work in quiescent state Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 01/13] atomic: introduce atomic_dec_fetch Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 02/13] cpus: pass CPUState to run_on_cpu helpers Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 03/13] cpus: Move common code out of {async_, }run_on_cpu() Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 04/13] cpus: Wrap mutex used to protect CPU work Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 05/13] cpus: Rename flush_queued_work() Alex Bennée
2016-08-02 17:27 ` Alex Bennée [this message]
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 07/13] linux-user: Rework exclusive operation mechanism Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 08/13] linux-user: Add qemu_cpu_is_self() and qemu_cpu_kick() Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 09/13] linux-user: Support CPU work queue Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 10/13] bsd-user: " Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 11/13] cpu-exec-common: Introduce async_safe_run_on_cpu() Alex Bennée
2016-08-02 19:22   ` Emilio G. Cota
2016-08-03 21:02     ` Alex Bennée
2016-08-03 23:17       ` Emilio G. Cota
2016-08-04  6:44         ` Alex Bennée
2016-08-28  0:21     ` Paolo Bonzini
2016-08-29 17:26       ` Paolo Bonzini
2016-08-31 10:09         ` Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 12/13] tcg: Make tb_flush() thread safe Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 13/13] cpu-exec: replace cpu->queued_work with GArray Alex Bennée
2016-08-02 17:36   ` Alex Bennée
2016-08-02 17:42   ` Alex Bennée
2016-08-02 18:53   ` Emilio G. Cota
2016-08-03  8:34     ` Alex Bennée

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1470158864-17651-7-git-send-email-alex.bennee@linaro.org \
    --to=alex.bennee@linaro.org \
    --cc=a.rigo@virtualopensystems.com \
    --cc=bobby.prani@gmail.com \
    --cc=claudio.fontana@huawei.com \
    --cc=cota@braap.org \
    --cc=fred.konrad@greensocs.com \
    --cc=jan.kiszka@siemens.com \
    --cc=mark.burton@greensocs.com \
    --cc=mttcg@listserver.greensocs.com \
    --cc=pbonzini@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=riku.voipio@iki.fi \
    --cc=rth@twiddle.net \
    --cc=serge.fdrv@gmail.com \
    --cc=sergey.fedorov@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).