From: Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru>
To: qemu-devel@nongnu.org
Cc: kwolf@redhat.com, peter.maydell@linaro.org,
boost.lists@gmail.com, quintela@redhat.com, jasowang@redhat.com,
mst@redhat.com, zuban32s@gmail.com,
maria.klimushenkova@ispras.ru, dovgaluk@ispras.ru,
kraxel@redhat.com, pavel.dovgaluk@ispras.ru, pbonzini@redhat.com,
alex.bennee@linaro.org
Subject: [Qemu-devel] [RFC PATCH v4 12/23] cpus: push BQL lock to qemu_*_wait_io_event
Date: Fri, 19 Jan 2018 11:44:09 +0300 [thread overview]
Message-ID: <20180119084409.7100.23132.stgit@pasha-VirtualBox> (raw)
In-Reply-To: <20180119084235.7100.98318.stgit@pasha-VirtualBox>
From: Alex Bennée <alex.bennee@linaro.org>
We only really need to grab the lock for initial setup (so we don't
race with the thread-spawning thread). After that we can drop the lock
for the whole main loop and only grab it for waiting for IO events.
There is a slight wrinkle for the round-robin TCG thread as we also
expire timers which needs to be done under BQL as they are in the
main-loop.
This is stage one of reducing the lock impact as we can drop the
requirement of implicit BQL for async work and only grab the lock when
we need to sleep on the cpu->halt_cond.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru>
---
accel/kvm/kvm-all.c | 4 ----
cpus.c | 22 +++++++++++++++-------
dtc | 2 +-
target/i386/hax-all.c | 2 --
4 files changed, 16 insertions(+), 14 deletions(-)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index f290f48..8d1d2c4 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -1857,9 +1857,7 @@ int kvm_cpu_exec(CPUState *cpu)
return EXCP_HLT;
}
- qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
-
do {
MemTxAttrs attrs;
@@ -1989,8 +1987,6 @@ int kvm_cpu_exec(CPUState *cpu)
} while (ret == 0);
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
-
if (ret < 0) {
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
vm_stop(RUN_STATE_INTERNAL_ERROR);
diff --git a/cpus.c b/cpus.c
index 7b6ce74..ca86d9f 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1150,10 +1150,14 @@ static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
start_tcg_kick_timer();
qemu_wait_io_event_common(cpu);
+
+ qemu_mutex_unlock_iothread();
}
static void qemu_wait_io_event(CPUState *cpu)
{
+ qemu_mutex_lock_iothread();
+
while (cpu_thread_is_idle(cpu)) {
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
@@ -1190,6 +1194,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
/* signal CPU creation */
cpu->created = true;
+ qemu_mutex_unlock_iothread();
+
qemu_cond_signal(&qemu_cpu_cond);
do {
@@ -1232,10 +1238,10 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
/* signal CPU creation */
cpu->created = true;
+ qemu_mutex_unlock_iothread();
qemu_cond_signal(&qemu_cpu_cond);
while (1) {
- qemu_mutex_unlock_iothread();
do {
int sig;
r = sigwait(&waitset, &sig);
@@ -1246,6 +1252,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
}
qemu_mutex_lock_iothread();
qemu_wait_io_event(cpu);
+ qemu_mutex_unlock_iothread();
}
return NULL;
@@ -1334,11 +1341,9 @@ static int tcg_cpu_exec(CPUState *cpu)
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
- qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
#ifdef CONFIG_PROFILER
tcg_time += profile_getclock() - ti;
#endif
@@ -1398,6 +1403,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
qemu_wait_io_event_common(cpu);
}
}
+ qemu_mutex_unlock_iothread();
start_tcg_kick_timer();
@@ -1407,6 +1413,8 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
cpu->exit_request = 1;
while (1) {
+ qemu_mutex_lock_iothread();
+
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_account_warp_timer();
@@ -1415,6 +1423,8 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
*/
handle_icount_deadline();
+ qemu_mutex_unlock_iothread();
+
if (!cpu) {
cpu = first_cpu;
}
@@ -1440,9 +1450,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
cpu_handle_guest_debug(cpu);
break;
} else if (r == EXCP_ATOMIC) {
- qemu_mutex_unlock_iothread();
cpu_exec_step_atomic(cpu);
- qemu_mutex_lock_iothread();
break;
}
} else if (cpu->stop) {
@@ -1483,6 +1491,7 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
current_cpu = cpu;
hax_init_vcpu(cpu);
+ qemu_mutex_unlock_iothread();
qemu_cond_signal(&qemu_cpu_cond);
while (1) {
@@ -1569,6 +1578,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
cpu->created = true;
cpu->can_do_io = 1;
current_cpu = cpu;
+ qemu_mutex_unlock_iothread();
qemu_cond_signal(&qemu_cpu_cond);
/* process any pending work */
@@ -1593,9 +1603,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
g_assert(cpu->halted);
break;
case EXCP_ATOMIC:
- qemu_mutex_unlock_iothread();
cpu_exec_step_atomic(cpu);
- qemu_mutex_lock_iothread();
default:
/* Ignore everything else? */
break;
diff --git a/dtc b/dtc
index e543880..558cd81 160000
--- a/dtc
+++ b/dtc
@@ -1 +1 @@
-Subproject commit e54388015af1fb4bf04d0bca99caba1074d9cc42
+Subproject commit 558cd81bdd432769b59bff01240c44f82cfb1a9d
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index 934ec4a..54b1fc7 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -513,11 +513,9 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
hax_vcpu_interrupt(env);
- qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
hax_ret = hax_vcpu_run(vcpu);
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
/* Simply continue the vcpu_run if system call interrupted */
if (hax_ret == -EINTR || hax_ret == -EAGAIN) {
next prev parent reply other threads:[~2018-01-19 8:44 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-19 8:42 [Qemu-devel] [RFC PATCH v4 00/23] replay additions Pavel Dovgalyuk
2018-01-19 8:42 ` [Qemu-devel] [RFC PATCH v4 01/23] This patch adds a condition before overwriting exception_index fields Pavel Dovgalyuk
2018-01-19 8:47 ` Paolo Bonzini
2018-01-19 8:42 ` [Qemu-devel] [RFC PATCH v4 02/23] block: implement bdrv_snapshot_goto for blkreplay Pavel Dovgalyuk
2018-01-19 8:42 ` [Qemu-devel] [RFC PATCH v4 03/23] blkreplay: create temporary overlay for underlaying devices Pavel Dovgalyuk
2018-01-19 8:43 ` [Qemu-devel] [RFC PATCH v4 04/23] replay: disable default snapshot for record/replay Pavel Dovgalyuk
2018-01-19 8:43 ` [Qemu-devel] [RFC PATCH v4 05/23] replay: fix processing async events Pavel Dovgalyuk
2018-01-19 8:43 ` [Qemu-devel] [RFC PATCH v4 06/23] replay: fixed replay_enable_events Pavel Dovgalyuk
2018-01-19 8:43 ` [Qemu-devel] [RFC PATCH v4 07/23] replay: fix save/load vm for non-empty queue Pavel Dovgalyuk
2018-01-19 8:43 ` [Qemu-devel] [RFC PATCH v4 08/23] replay: added replay log format description Pavel Dovgalyuk
2018-01-19 8:43 ` [Qemu-devel] [RFC PATCH v4 09/23] replay: make safe vmstop at record/replay Pavel Dovgalyuk
2018-01-19 8:43 ` [Qemu-devel] [RFC PATCH v4 10/23] replay: save prior value of the host clock Pavel Dovgalyuk
2018-01-19 8:43 ` [Qemu-devel] [RFC PATCH v4 11/23] target/arm/arm-powertctl: drop BQL assertions Pavel Dovgalyuk
2018-01-19 8:44 ` Pavel Dovgalyuk [this message]
2018-01-19 8:55 ` [Qemu-devel] [RFC PATCH v4 12/23] cpus: push BQL lock to qemu_*_wait_io_event Paolo Bonzini
2018-01-19 11:52 ` Pavel Dovgalyuk
2018-01-19 8:44 ` [Qemu-devel] [RFC PATCH v4 13/23] cpus: only take BQL for sleeping threads Pavel Dovgalyuk
2018-01-19 8:59 ` Paolo Bonzini
2018-01-19 12:05 ` Pavel Dovgalyuk
2018-01-19 12:19 ` Paolo Bonzini
2018-01-19 12:25 ` Pavel Dovgalyuk
2018-01-19 12:26 ` Paolo Bonzini
2018-01-19 12:36 ` Pavel Dovgalyuk
2018-01-19 13:20 ` Pavel Dovgalyuk
2018-01-19 13:33 ` Paolo Bonzini
2018-01-22 6:47 ` Pavel Dovgalyuk
2018-01-19 8:44 ` [Qemu-devel] [RFC PATCH v4 14/23] replay/replay.c: bump REPLAY_VERSION again Pavel Dovgalyuk
2018-01-19 8:44 ` [Qemu-devel] [RFC PATCH v4 15/23] replay/replay-internal.c: track holding of replay_lock Pavel Dovgalyuk
2018-01-19 8:44 ` [Qemu-devel] [RFC PATCH v4 16/23] replay: make locking visible outside replay code Pavel Dovgalyuk
2018-01-19 8:44 ` [Qemu-devel] [RFC PATCH v4 17/23] replay: push replay_mutex_lock up the call tree Pavel Dovgalyuk
2018-01-19 8:44 ` [Qemu-devel] [RFC PATCH v4 18/23] replay: don't destroy mutex at exit Pavel Dovgalyuk
2018-01-19 8:44 ` [Qemu-devel] [RFC PATCH v4 19/23] replay: check return values of fwrite Pavel Dovgalyuk
2018-01-19 8:45 ` [Qemu-devel] [RFC PATCH v4 20/23] replay: avoid recursive call of checkpoints Pavel Dovgalyuk
2018-01-19 8:45 ` [Qemu-devel] [RFC PATCH v4 21/23] scripts/replay-dump.py: replay log dumper Pavel Dovgalyuk
2018-01-19 8:45 ` [Qemu-devel] [RFC PATCH v4 22/23] replay: don't process async events when warping the clock Pavel Dovgalyuk
2018-01-19 8:45 ` [Qemu-devel] [RFC PATCH v4 23/23] replay: save vmstate of the asynchronous events Pavel Dovgalyuk
2018-01-19 9:25 ` [Qemu-devel] [RFC PATCH v4 00/23] replay additions no-reply
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180119084409.7100.23132.stgit@pasha-VirtualBox \
--to=pavel.dovgaluk@ispras.ru \
--cc=alex.bennee@linaro.org \
--cc=boost.lists@gmail.com \
--cc=dovgaluk@ispras.ru \
--cc=jasowang@redhat.com \
--cc=kraxel@redhat.com \
--cc=kwolf@redhat.com \
--cc=maria.klimushenkova@ispras.ru \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=zuban32s@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).