From: Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru>
To: qemu-devel@nongnu.org
Cc: kwolf@redhat.com, peter.maydell@linaro.org,
boost.lists@gmail.com, quintela@redhat.com, jasowang@redhat.com,
mst@redhat.com, zuban32s@gmail.com,
maria.klimushenkova@ispras.ru, dovgaluk@ispras.ru,
kraxel@redhat.com, pavel.dovgaluk@ispras.ru, pbonzini@redhat.com,
alex.bennee@linaro.org
Subject: [Qemu-devel] [RFC PATCH v5 11/24] cpus: push BQL lock to qemu_*_wait_io_event
Date: Tue, 23 Jan 2018 11:54:21 +0300 [thread overview]
Message-ID: <20180123085421.3419.45127.stgit@pasha-VirtualBox> (raw)
In-Reply-To: <20180123085319.3419.97865.stgit@pasha-VirtualBox>
From: Alex Bennée <alex.bennee@linaro.org>
We only really need to grab the lock for initial setup (so we don't
race with the thread-spawning thread). After that we can drop the lock
for the whole main loop and only grab it for waiting for IO events.
There is a slight wrinkle for the round-robin TCG thread as we also
expire timers which needs to be done under BQL as they are in the
main-loop.
This is stage one of reducing the lock impact as we can drop the
requirement of implicit BQL for async work and only grab the lock when
we need to sleep on the cpu->halt_cond.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru>
---
accel/kvm/kvm-all.c | 1 -
cpus.c | 29 +++++++++++++++++------------
2 files changed, 17 insertions(+), 13 deletions(-)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 071f4f5..9628512 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -1863,7 +1863,6 @@ int kvm_cpu_exec(CPUState *cpu)
qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
-
do {
MemTxAttrs attrs;
diff --git a/cpus.c b/cpus.c
index 2cb0af9..577c764 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1141,6 +1141,7 @@ static void qemu_wait_io_event_common(CPUState *cpu)
static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
{
+ qemu_mutex_lock_iothread();
while (all_cpu_threads_idle()) {
stop_tcg_kick_timer();
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
@@ -1149,10 +1150,13 @@ static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
start_tcg_kick_timer();
qemu_wait_io_event_common(cpu);
+ qemu_mutex_unlock_iothread();
}
static void qemu_wait_io_event(CPUState *cpu)
{
+ qemu_mutex_lock_iothread();
+
while (cpu_thread_is_idle(cpu)) {
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
@@ -1164,6 +1168,7 @@ static void qemu_wait_io_event(CPUState *cpu)
}
#endif
qemu_wait_io_event_common(cpu);
+ qemu_mutex_unlock_iothread();
}
static void *qemu_kvm_cpu_thread_fn(void *arg)
@@ -1189,6 +1194,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
/* signal CPU creation */
cpu->created = true;
+ qemu_mutex_unlock_iothread();
+
qemu_cond_signal(&qemu_cpu_cond);
do {
@@ -1204,7 +1211,6 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
qemu_kvm_destroy_vcpu(cpu);
cpu->created = false;
qemu_cond_signal(&qemu_cpu_cond);
- qemu_mutex_unlock_iothread();
return NULL;
}
@@ -1231,10 +1237,10 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
/* signal CPU creation */
cpu->created = true;
+ qemu_mutex_unlock_iothread();
qemu_cond_signal(&qemu_cpu_cond);
while (1) {
- qemu_mutex_unlock_iothread();
do {
int sig;
r = sigwait(&waitset, &sig);
@@ -1243,7 +1249,6 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
perror("sigwait");
exit(1);
}
- qemu_mutex_lock_iothread();
qemu_wait_io_event(cpu);
}
@@ -1333,11 +1338,9 @@ static int tcg_cpu_exec(CPUState *cpu)
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
- qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
#ifdef CONFIG_PROFILER
tcg_time += profile_getclock() - ti;
#endif
@@ -1397,6 +1400,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
qemu_wait_io_event_common(cpu);
}
}
+ qemu_mutex_unlock_iothread();
start_tcg_kick_timer();
@@ -1406,6 +1410,8 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
cpu->exit_request = 1;
while (1) {
+ qemu_mutex_lock_iothread();
+
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_account_warp_timer();
@@ -1414,6 +1420,8 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
*/
handle_icount_deadline();
+ qemu_mutex_unlock_iothread();
+
if (!cpu) {
cpu = first_cpu;
}
@@ -1439,9 +1447,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
cpu_handle_guest_debug(cpu);
break;
} else if (r == EXCP_ATOMIC) {
- qemu_mutex_unlock_iothread();
cpu_exec_step_atomic(cpu);
- qemu_mutex_lock_iothread();
break;
}
} else if (cpu->stop) {
@@ -1482,6 +1488,7 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
current_cpu = cpu;
hax_init_vcpu(cpu);
+ qemu_mutex_unlock_iothread();
qemu_cond_signal(&qemu_cpu_cond);
while (1) {
@@ -1518,8 +1525,9 @@ static void *qemu_hvf_cpu_thread_fn(void *arg)
hvf_init_vcpu(cpu);
- /* signal CPU creation */
cpu->created = true;
+ qemu_mutex_unlock_iothread();
+ /* signal CPU creation */
qemu_cond_signal(&qemu_cpu_cond);
do {
@@ -1535,7 +1543,6 @@ static void *qemu_hvf_cpu_thread_fn(void *arg)
hvf_vcpu_destroy(cpu);
cpu->created = false;
qemu_cond_signal(&qemu_cpu_cond);
- qemu_mutex_unlock_iothread();
return NULL;
}
@@ -1568,6 +1575,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
cpu->created = true;
cpu->can_do_io = 1;
current_cpu = cpu;
+ qemu_mutex_unlock_iothread();
qemu_cond_signal(&qemu_cpu_cond);
/* process any pending work */
@@ -1592,9 +1600,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
g_assert(cpu->halted);
break;
case EXCP_ATOMIC:
- qemu_mutex_unlock_iothread();
cpu_exec_step_atomic(cpu);
- qemu_mutex_lock_iothread();
default:
/* Ignore everything else? */
break;
@@ -1603,7 +1609,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
qemu_tcg_destroy_vcpu(cpu);
cpu->created = false;
qemu_cond_signal(&qemu_cpu_cond);
- qemu_mutex_unlock_iothread();
return NULL;
}
next prev parent reply other threads:[~2018-01-23 8:54 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-23 8:53 [Qemu-devel] [RFC PATCH v5 00/24] replay additions Pavel Dovgalyuk
2018-01-23 8:53 ` [Qemu-devel] [RFC PATCH v5 01/24] cpu-exec: fix exception_index handling Pavel Dovgalyuk
2018-01-23 8:53 ` [Qemu-devel] [RFC PATCH v5 02/24] block: implement bdrv_snapshot_goto for blkreplay Pavel Dovgalyuk
2018-01-23 8:53 ` [Qemu-devel] [RFC PATCH v5 03/24] blkreplay: create temporary overlay for underlaying devices Pavel Dovgalyuk
2018-01-23 8:53 ` [Qemu-devel] [RFC PATCH v5 04/24] replay: disable default snapshot for record/replay Pavel Dovgalyuk
2018-01-23 8:53 ` [Qemu-devel] [RFC PATCH v5 05/24] replay: fix processing async events Pavel Dovgalyuk
2018-01-23 8:53 ` [Qemu-devel] [RFC PATCH v5 06/24] replay: fixed replay_enable_events Pavel Dovgalyuk
2018-01-23 8:53 ` [Qemu-devel] [RFC PATCH v5 07/24] replay: fix save/load vm for non-empty queue Pavel Dovgalyuk
2018-01-23 8:54 ` [Qemu-devel] [RFC PATCH v5 08/24] replay: added replay log format description Pavel Dovgalyuk
2018-01-23 8:54 ` [Qemu-devel] [RFC PATCH v5 09/24] replay: save prior value of the host clock Pavel Dovgalyuk
2018-01-23 8:54 ` [Qemu-devel] [RFC PATCH v5 10/24] target/arm/arm-powertctl: drop BQL assertions Pavel Dovgalyuk
2018-01-23 8:54 ` Pavel Dovgalyuk [this message]
2018-01-23 8:54 ` [Qemu-devel] [RFC PATCH v5 12/24] hax: remove BQL lock/unlock Pavel Dovgalyuk
2018-01-23 8:54 ` [Qemu-devel] [RFC PATCH v5 13/24] kvm: " Pavel Dovgalyuk
2018-01-31 1:24 ` Paolo Bonzini
2018-01-23 8:54 ` [Qemu-devel] [RFC PATCH v5 14/24] replay/replay.c: bump REPLAY_VERSION again Pavel Dovgalyuk
2018-01-23 8:54 ` [Qemu-devel] [RFC PATCH v5 15/24] replay/replay-internal.c: track holding of replay_lock Pavel Dovgalyuk
2018-01-23 8:54 ` [Qemu-devel] [RFC PATCH v5 16/24] replay: make locking visible outside replay code Pavel Dovgalyuk
2018-01-23 8:54 ` [Qemu-devel] [RFC PATCH v5 17/24] replay: push replay_mutex_lock up the call tree Pavel Dovgalyuk
2018-01-23 8:55 ` [Qemu-devel] [RFC PATCH v5 18/24] replay: don't destroy mutex at exit Pavel Dovgalyuk
2018-01-23 8:55 ` [Qemu-devel] [RFC PATCH v5 19/24] replay: check return values of fwrite Pavel Dovgalyuk
2018-01-23 8:55 ` [Qemu-devel] [RFC PATCH v5 20/24] replay: avoid recursive call of checkpoints Pavel Dovgalyuk
2018-01-23 8:55 ` [Qemu-devel] [RFC PATCH v5 21/24] scripts/replay-dump.py: replay log dumper Pavel Dovgalyuk
2018-01-23 8:55 ` [Qemu-devel] [RFC PATCH v5 22/24] replay: don't process async events when warping the clock Pavel Dovgalyuk
2018-01-23 8:55 ` [Qemu-devel] [RFC PATCH v5 23/24] replay: save vmstate of the asynchronous events Pavel Dovgalyuk
2018-01-23 8:55 ` [Qemu-devel] [RFC PATCH v5 24/24] replay: don't drain/flush bdrv queue while RR is working Pavel Dovgalyuk
2018-01-23 9:32 ` [Qemu-devel] [RFC PATCH v5 00/24] replay additions no-reply
2018-01-30 5:57 ` Pavel Dovgalyuk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180123085421.3419.45127.stgit@pasha-VirtualBox \
--to=pavel.dovgaluk@ispras.ru \
--cc=alex.bennee@linaro.org \
--cc=boost.lists@gmail.com \
--cc=dovgaluk@ispras.ru \
--cc=jasowang@redhat.com \
--cc=kraxel@redhat.com \
--cc=kwolf@redhat.com \
--cc=maria.klimushenkova@ispras.ru \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=zuban32s@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).