From: David Engraf <david.engraf@sysgo.com>
To: qemu-devel@nongnu.org
Cc: Paolo Bonzini <pbonzini@redhat.com>, Fam Zheng <famz@redhat.com>
Subject: [Qemu-devel] [PATCH] qemu_mutex_iothread_locked not correctly synchronized
Date: Tue, 24 Nov 2015 16:43:28 +0100 [thread overview]
Message-ID: <565485A0.5090902@sysgo.com> (raw)
Commit afbe70535ff1a8a7a32910cc15ebecc0ba92e7da introduced the function
qemu_mutex_iothread_locked to avoid recursive locking of the iothread
lock. The iothread_locked variable uses the __thread attribute which
results in a per thread storage location whereas the qemu_global_mutex
is not thread specific. This leads to race conditions because the
variable is not in sync between threads. I triggered this problem
reproducible on a Windows machine whereas Linux works fine.
After removing the __thread attribute, iothread_locked may still return
an invalid state because some functions directly use qemu_cond_wait
which will unlock and lock the qemu_global_mutex based on a condition.
These calls must be synced with iothread_locked as well.
The patch removes the __thread flag from iothread_locked and adds a
function called qemu_cond_wait_iothread to keep iothread_locked in sync.
Signed-off-by: David Engraf <david.engraf@sysgo.com>
---
cpus.c | 29 +++++++++++++++++++----------
1 file changed, 19 insertions(+), 10 deletions(-)
diff --git a/cpus.c b/cpus.c
index 877bd70..d7cdd11 100644
--- a/cpus.c
+++ b/cpus.c
@@ -70,6 +70,8 @@ static CPUState *next_cpu;
int64_t max_delay;
int64_t max_advance;
+static void qemu_cond_wait_iothread(QemuCond *cond);
+
/* vcpu throttling controls */
static QEMUTimer *throttle_timer;
static unsigned int throttle_percentage;
@@ -920,7 +922,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void
*data), void *data)
while (!atomic_mb_read(&wi.done)) {
CPUState *self_cpu = current_cpu;
- qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
+ qemu_cond_wait_iothread(&qemu_work_cond);
current_cpu = self_cpu;
}
}
@@ -998,11 +1000,11 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
/* Start accounting real time to the virtual clock if the CPUs
are idle. */
qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
- qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+ qemu_cond_wait_iothread(cpu->halt_cond);
}
while (iothread_requesting_mutex) {
- qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
+ qemu_cond_wait_iothread(&qemu_io_proceeded_cond);
}
CPU_FOREACH(cpu) {
@@ -1013,7 +1015,7 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
static void qemu_kvm_wait_io_event(CPUState *cpu)
{
while (cpu_thread_is_idle(cpu)) {
- qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+ qemu_cond_wait_iothread(cpu->halt_cond);
}
qemu_kvm_eat_signals(cpu);
@@ -1123,7 +1125,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
/* wait for initial kick-off after machine start */
while (first_cpu->stopped) {
- qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
+ qemu_cond_wait_iothread(first_cpu->halt_cond);
/* process any pending work */
CPU_FOREACH(cpu) {
@@ -1208,13 +1210,20 @@ bool qemu_in_vcpu_thread(void)
return current_cpu && qemu_cpu_is_self(current_cpu);
}
-static __thread bool iothread_locked = false;
+static bool iothread_locked;
bool qemu_mutex_iothread_locked(void)
{
return iothread_locked;
}
+static void qemu_cond_wait_iothread(QemuCond *cond)
+{
+ iothread_locked = false;
+ qemu_cond_wait(cond, &qemu_global_mutex);
+ iothread_locked = true;
+}
+
void qemu_mutex_lock_iothread(void)
{
atomic_inc(&iothread_requesting_mutex);
@@ -1277,7 +1286,7 @@ void pause_all_vcpus(void)
}
while (!all_vcpus_paused()) {
- qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
+ qemu_cond_wait_iothread(&qemu_pause_cond);
CPU_FOREACH(cpu) {
qemu_cpu_kick(cpu);
}
@@ -1326,7 +1335,7 @@ static void qemu_tcg_init_vcpu(CPUState *cpu)
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+ qemu_cond_wait_iothread(&qemu_cpu_cond);
}
tcg_cpu_thread = cpu->thread;
} else {
@@ -1347,7 +1356,7 @@ static void qemu_kvm_start_vcpu(CPUState *cpu)
qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
cpu, QEMU_THREAD_JOINABLE);
while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+ qemu_cond_wait_iothread(&qemu_cpu_cond);
}
}
@@ -1363,7 +1372,7 @@ static void qemu_dummy_start_vcpu(CPUState *cpu)
qemu_thread_create(cpu->thread, thread_name,
qemu_dummy_cpu_thread_fn, cpu,
QEMU_THREAD_JOINABLE);
while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+ qemu_cond_wait_iothread(&qemu_cpu_cond);
}
}
--
1.9.1
next reply other threads:[~2015-11-24 15:43 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-11-24 15:43 David Engraf [this message]
2015-11-24 15:54 ` [Qemu-devel] [PATCH] qemu_mutex_iothread_locked not correctly synchronized Paolo Bonzini
2015-11-25 9:08 ` David Engraf
2015-11-25 12:31 ` [Qemu-devel] [PATCH v2] " David Engraf
2015-11-25 13:26 ` Paolo Bonzini
2015-11-25 14:04 ` David Engraf
2015-11-25 14:36 ` Paolo Bonzini
2015-11-25 15:48 ` David Engraf
2015-11-25 16:16 ` Paolo Bonzini
2015-11-26 9:12 ` David Engraf
2015-11-26 11:25 ` Stefan Weil
2015-11-26 14:26 ` David Engraf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=565485A0.5090902@sysgo.com \
--to=david.engraf@sysgo.com \
--cc=famz@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).