qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Emilio G. Cota" <cota@braap.org>,
	KONRAD Frederic <fred.konrad@greensocs.com>
Subject: [Qemu-devel] [PULL 38/43] replace spinlock by QemuMutex.
Date: Wed,  9 Sep 2015 15:50:08 +0200	[thread overview]
Message-ID: <1441806613-13775-39-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1441806613-13775-1-git-send-email-pbonzini@redhat.com>

From: KONRAD Frederic <fred.konrad@greensocs.com>

spinlock is only used in two cases:
  * cpu-exec.c: to protect TranslationBlock
  * mem_helper.c: for lock helper in target-i386 (which seems broken).

It's a pthread_mutex_t in user-mode, so we can use QemuMutex directly,
with an #ifdef.  The #ifdef will be removed when multithreaded TCG
will need the mutex as well.

Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
Message-Id: <1439220437-23957-5-git-send-email-fred.konrad@greensocs.com>
Signed-off-by: Emilio G. Cota <cota@braap.org>
[Merge Emilio G. Cota's patch to remove volatile. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 cpu-exec.c               | 14 +++-----------
 include/exec/exec-all.h  |  4 ++--
 linux-user/main.c        |  6 +++---
 target-i386/cpu.h        |  3 +++
 target-i386/mem_helper.c | 25 ++++++++++++++++++++++---
 target-i386/translate.c  |  2 ++
 tcg/tcg.h                |  4 ++++
 translate-all.c          | 34 ++++++++++++++++++++++++++++++++++
 8 files changed, 73 insertions(+), 19 deletions(-)

diff --git a/cpu-exec.c b/cpu-exec.c
index 6a30261..2c0a6f6 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -357,9 +357,6 @@ int cpu_exec(CPUState *cpu)
     uintptr_t next_tb;
     SyncClocks sc;
 
-    /* This must be volatile so it is not trashed by longjmp() */
-    volatile bool have_tb_lock = false;
-
     if (cpu->halted) {
         if (!cpu_has_work(cpu)) {
             return EXCP_HALTED;
@@ -468,8 +465,7 @@ int cpu_exec(CPUState *cpu)
                     cpu->exception_index = EXCP_INTERRUPT;
                     cpu_loop_exit(cpu);
                 }
-                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
-                have_tb_lock = true;
+                tb_lock();
                 tb = tb_find_fast(cpu);
                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
                    doing it in tb_find_slow */
@@ -491,8 +487,7 @@ int cpu_exec(CPUState *cpu)
                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                 next_tb & TB_EXIT_MASK, tb);
                 }
-                have_tb_lock = false;
-                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
+                tb_unlock();
                 if (likely(!cpu->exit_request)) {
                     trace_exec_tb(tb, tb->pc);
                     tc_ptr = tb->tc_ptr;
@@ -558,10 +553,7 @@ int cpu_exec(CPUState *cpu)
             x86_cpu = X86_CPU(cpu);
             env = &x86_cpu->env;
 #endif
-            if (have_tb_lock) {
-                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
-                have_tb_lock = false;
-            }
+            tb_lock_reset();
         }
     } /* for(;;) */
 
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 4a09e6c..59544d4 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -225,7 +225,7 @@ struct TranslationBlock {
     struct TranslationBlock *jmp_first;
 };
 
-#include "exec/spinlock.h"
+#include "qemu/thread.h"
 
 typedef struct TBContext TBContext;
 
@@ -235,7 +235,7 @@ struct TBContext {
     TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
     int nb_tbs;
     /* any access to the tbs or the page table must use this lock */
-    spinlock_t tb_lock;
+    QemuMutex tb_lock;
 
     /* statistics */
     int tb_flush_count;
diff --git a/linux-user/main.c b/linux-user/main.c
index 2c9658e..1cecc4c 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -105,7 +105,7 @@ static int pending_cpus;
 /* Make sure everything is in a consistent state for calling fork().  */
 void fork_start(void)
 {
-    pthread_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
+    qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
     pthread_mutex_lock(&exclusive_lock);
     mmap_fork_start();
 }
@@ -127,11 +127,11 @@ void fork_end(int child)
         pthread_mutex_init(&cpu_list_mutex, NULL);
         pthread_cond_init(&exclusive_cond, NULL);
         pthread_cond_init(&exclusive_resume, NULL);
-        pthread_mutex_init(&tcg_ctx.tb_ctx.tb_lock, NULL);
+        qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
         gdbserver_fork(thread_cpu);
     } else {
         pthread_mutex_unlock(&exclusive_lock);
-        pthread_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
+        qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
     }
 }
 
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 74b674d..0337838 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -1318,6 +1318,9 @@ static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
 void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
 void cpu_set_fpuc(CPUX86State *env, uint16_t val);
 
+/* mem_helper.c */
+void helper_lock_init(void);
+
 /* svm_helper.c */
 void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
                                    uint64_t param);
diff --git a/target-i386/mem_helper.c b/target-i386/mem_helper.c
index 1aec8a5..8bf0da2 100644
--- a/target-i386/mem_helper.c
+++ b/target-i386/mem_helper.c
@@ -23,18 +23,37 @@
 
 /* broken thread support */
 
-static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
+#if defined(CONFIG_USER_ONLY)
+QemuMutex global_cpu_lock;
 
 void helper_lock(void)
 {
-    spin_lock(&global_cpu_lock);
+    qemu_mutex_lock(&global_cpu_lock);
 }
 
 void helper_unlock(void)
 {
-    spin_unlock(&global_cpu_lock);
+    qemu_mutex_unlock(&global_cpu_lock);
 }
 
+void helper_lock_init(void)
+{
+    qemu_mutex_init(&global_cpu_lock);
+}
+#else
+void helper_lock(void)
+{
+}
+
+void helper_unlock(void)
+{
+}
+
+void helper_lock_init(void)
+{
+}
+#endif
+
 void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
 {
     uint64_t d;
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 82e2245..443bf60 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -7899,6 +7899,8 @@ void optimize_flags_init(void)
                                          offsetof(CPUX86State, regs[i]),
                                          reg_names[i]);
     }
+
+    helper_lock_init();
 }
 
 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
diff --git a/tcg/tcg.h b/tcg/tcg.h
index f437824..aa295b9 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -595,6 +595,10 @@ void *tcg_malloc_internal(TCGContext *s, int size);
 void tcg_pool_reset(TCGContext *s);
 void tcg_pool_delete(TCGContext *s);
 
+void tb_lock(void);
+void tb_unlock(void);
+void tb_lock_reset(void);
+
 static inline void *tcg_malloc(int size)
 {
     TCGContext *s = &tcg_ctx;
diff --git a/translate-all.c b/translate-all.c
index a75aeed..37bb56c 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -128,6 +128,39 @@ static void *l1_map[V_L1_SIZE];
 /* code generation context */
 TCGContext tcg_ctx;
 
+/* translation block context */
+#ifdef CONFIG_USER_ONLY
+__thread int have_tb_lock;
+#endif
+
+void tb_lock(void)
+{
+#ifdef CONFIG_USER_ONLY
+    assert(!have_tb_lock);
+    qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
+    have_tb_lock++;
+#endif
+}
+
+void tb_unlock(void)
+{
+#ifdef CONFIG_USER_ONLY
+    assert(have_tb_lock);
+    have_tb_lock--;
+    qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
+#endif
+}
+
+void tb_lock_reset(void)
+{
+#ifdef CONFIG_USER_ONLY
+    if (have_tb_lock) {
+        qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
+        have_tb_lock = 0;
+    }
+#endif
+}
+
 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
                          tb_page_addr_t phys_page2);
 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
@@ -675,6 +708,7 @@ static inline void code_gen_alloc(size_t tb_size)
             CODE_GEN_AVG_BLOCK_SIZE;
     tcg_ctx.tb_ctx.tbs =
             g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
+    qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
 }
 
 /* Must be called before using the QEMU cpus. 'tb_size' is the size
-- 
2.4.3

  parent reply	other threads:[~2015-09-09 13:51 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-09-09 13:49 [Qemu-devel] [PULL 00/43] First batch of misc changes for 2.5 (2015-09-09) Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 01/43] qemu-thread: handle spurious futex_wait wakeups Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 02/43] seqlock: add missing 'inline' to seqlock_read_retry Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 03/43] seqlock: read sequence number atomically Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 04/43] cpus.c: qemu_mutex_lock_iothread fix race condition at cpu thread init Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 05/43] iohandler: Use aio API Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 06/43] block/iscsi: validate block size returned from target Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 07/43] Added generic panic handler qemu_system_guest_panicked() Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 08/43] i8257: rewrite DMA_schedule to avoid hooking into the CPU loop Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 09/43] i8257: remove cpu_request_exit irq Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 10/43] tcg: introduce tcg_current_cpu Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 11/43] remove qemu/tls.h Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 12/43] tcg: assign cpu->current_tb in a simpler place Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 13/43] tcg: synchronize cpu->exit_request and cpu->tcg_exit_req accesses Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 14/43] tcg: synchronize exit_request and tcg_current_cpu accesses Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 15/43] use qemu_cpu_kick instead of cpu_exit or qemu_cpu_kick_thread Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 16/43] tcg: signal-free qemu_cpu_kick Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 17/43] Move RAMBlock and ram_list to ram_addr.h Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 18/43] Makefile.target: include top level build dir in vpath Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 19/43] rcu: init rcu_registry_lock after fork Paolo Bonzini
2015-09-16 12:37   ` Gerd Hoffmann
2015-09-16 12:38     ` Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 20/43] rcu: fix comment with s/rcu_gp_lock/rcu_registry_lock/ Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 21/43] linux-user: call rcu_(un)register_thread on pthread_(exit|create) Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 22/43] translate-all: remove obsolete comment about l1_map Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 23/43] cutils: Add qemu_strtol() wrapper Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 24/43] cutils: Add qemu_strtoul() wrapper Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 25/43] cutils: Add qemu_strtoll() wrapper Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 26/43] cutils: Add qemu_strtoull() wrapper Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 27/43] qmp: Add example usage of strto*l() qemu wrapper Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 28/43] CODING_STYLE: update mixed declaration rules Paolo Bonzini
2015-09-09 13:49 ` [Qemu-devel] [PULL 29/43] checkpatch: adapt some tests to QEMU Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 30/43] checkpatch: remove tests that are not relevant outside the kernel Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 31/43] vhost-scsi: fix wrong vhost-scsi firmware path Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 32/43] configure: factor out adding disas configure Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 33/43] add macro file for coccinelle Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 34/43] configure: Add support for jemalloc Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 35/43] scripts/dump-guest-memory.py: fix after RAMBlock change Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 36/43] cpus: protect work list with work_mutex Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 37/43] cpus: remove tcg_halt_cond and tcg_cpu_thread globals Paolo Bonzini
2015-09-09 13:50 ` Paolo Bonzini [this message]
2015-09-09 13:50 ` [Qemu-devel] [PULL 39/43] remove unused spinlock Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 40/43] tcg: add memory barriers in page_find_alloc accesses Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 41/43] tcg: comment on which functions have to be called with mmap_lock held Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 42/43] exec: make mmap_lock/mmap_unlock globally available Paolo Bonzini
2015-09-09 13:50 ` [Qemu-devel] [PULL 43/43] cpu-exec: fix lock hierarchy for user-mode emulation Paolo Bonzini
2015-09-09 18:41 ` [Qemu-devel] [PULL 00/43] First batch of misc changes for 2.5 (2015-09-09) Peter Maydell
2015-09-09 19:25   ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1441806613-13775-39-git-send-email-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=cota@braap.org \
    --cc=fred.konrad@greensocs.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).