From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:38060) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1a1Fkk-0001bw-6T for qemu-devel@nongnu.org; Tue, 24 Nov 2015 10:43:35 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1a1Fkg-0006lm-VT for qemu-devel@nongnu.org; Tue, 24 Nov 2015 10:43:34 -0500 Received: from mail.sysgo.com ([176.9.12.79]:40771) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1a1Fkg-0006lV-Mb for qemu-devel@nongnu.org; Tue, 24 Nov 2015 10:43:30 -0500 From: David Engraf Message-ID: <565485A0.5090902@sysgo.com> Date: Tue, 24 Nov 2015 16:43:28 +0100 MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8; format=flowed Content-Transfer-Encoding: 7bit Subject: [Qemu-devel] [PATCH] qemu_mutex_iothread_locked not correctly synchronized List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: Paolo Bonzini , Fam Zheng Commit afbe70535ff1a8a7a32910cc15ebecc0ba92e7da introduced the function qemu_mutex_iothread_locked to avoid recursive locking of the iothread lock. The iothread_locked variable uses the __thread attribute which results in a per thread storage location whereas the qemu_global_mutex is not thread specific. This leads to race conditions because the variable is not in sync between threads. I triggered this problem reproducible on a Windows machine whereas Linux works fine. After removing the __thread attribute, iothread_locked may still return an invalid state because some functions directly use qemu_cond_wait which will unlock and lock the qemu_global_mutex based on a condition. These calls must be synced with iothread_locked as well. The patch removes the __thread flag from iothread_locked and adds a function called qemu_cond_wait_iothread to keep iothread_locked in sync. Signed-off-by: David Engraf --- cpus.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/cpus.c b/cpus.c index 877bd70..d7cdd11 100644 --- a/cpus.c +++ b/cpus.c @@ -70,6 +70,8 @@ static CPUState *next_cpu; int64_t max_delay; int64_t max_advance; +static void qemu_cond_wait_iothread(QemuCond *cond); + /* vcpu throttling controls */ static QEMUTimer *throttle_timer; static unsigned int throttle_percentage; @@ -920,7 +922,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) while (!atomic_mb_read(&wi.done)) { CPUState *self_cpu = current_cpu; - qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); + qemu_cond_wait_iothread(&qemu_work_cond); current_cpu = self_cpu; } } @@ -998,11 +1000,11 @@ static void qemu_tcg_wait_io_event(CPUState *cpu) /* Start accounting real time to the virtual clock if the CPUs are idle. */ qemu_clock_warp(QEMU_CLOCK_VIRTUAL); - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); + qemu_cond_wait_iothread(cpu->halt_cond); } while (iothread_requesting_mutex) { - qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex); + qemu_cond_wait_iothread(&qemu_io_proceeded_cond); } CPU_FOREACH(cpu) { @@ -1013,7 +1015,7 @@ static void qemu_tcg_wait_io_event(CPUState *cpu) static void qemu_kvm_wait_io_event(CPUState *cpu) { while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); + qemu_cond_wait_iothread(cpu->halt_cond); } qemu_kvm_eat_signals(cpu); @@ -1123,7 +1125,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) /* wait for initial kick-off after machine start */ while (first_cpu->stopped) { - qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex); + qemu_cond_wait_iothread(first_cpu->halt_cond); /* process any pending work */ CPU_FOREACH(cpu) { @@ -1208,13 +1210,20 @@ bool qemu_in_vcpu_thread(void) return current_cpu && qemu_cpu_is_self(current_cpu); } -static __thread bool iothread_locked = false; +static bool iothread_locked; bool qemu_mutex_iothread_locked(void) { return iothread_locked; } +static void qemu_cond_wait_iothread(QemuCond *cond) +{ + iothread_locked = false; + qemu_cond_wait(cond, &qemu_global_mutex); + iothread_locked = true; +} + void qemu_mutex_lock_iothread(void) { atomic_inc(&iothread_requesting_mutex); @@ -1277,7 +1286,7 @@ void pause_all_vcpus(void) } while (!all_vcpus_paused()) { - qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); + qemu_cond_wait_iothread(&qemu_pause_cond); CPU_FOREACH(cpu) { qemu_cpu_kick(cpu); } @@ -1326,7 +1335,7 @@ static void qemu_tcg_init_vcpu(CPUState *cpu) cpu->hThread = qemu_thread_get_handle(cpu->thread); #endif while (!cpu->created) { - qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); + qemu_cond_wait_iothread(&qemu_cpu_cond); } tcg_cpu_thread = cpu->thread; } else { @@ -1347,7 +1356,7 @@ static void qemu_kvm_start_vcpu(CPUState *cpu) qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); while (!cpu->created) { - qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); + qemu_cond_wait_iothread(&qemu_cpu_cond); } } @@ -1363,7 +1372,7 @@ static void qemu_dummy_start_vcpu(CPUState *cpu) qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); while (!cpu->created) { - qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); + qemu_cond_wait_iothread(&qemu_cpu_cond); } } -- 1.9.1