From: Robert Foley <robert.foley@linaro.org>
To: qemu-devel@nongnu.org
Cc: richard.henderson@linaro.org, cota@braap.org,
alex.bennee@linaro.org, robert.foley@linaro.org,
peter.puhov@linaro.org
Subject: [PATCH v9 07/74] cpu: make per-CPU locks an alias of the BQL in TCG rr mode
Date: Thu, 21 May 2020 12:39:04 -0400 [thread overview]
Message-ID: <20200521164011.638-8-robert.foley@linaro.org> (raw)
In-Reply-To: <20200521164011.638-1-robert.foley@linaro.org>
From: "Emilio G. Cota" <cota@braap.org>
Before we can switch from the BQL to per-CPU locks in
the CPU loop, we have to accommodate the fact that TCG
rr mode (i.e. !MTTCG) cannot work with separate per-vCPU
locks. That would lead to deadlock since we need a single
lock/condvar pair on which to wait for events that affect
any vCPU, e.g. in qemu_tcg_rr_wait_io_event.
At the same time, we are moving towards an interface where
the BQL and CPU locks are independent, and the only requirement
is that the locking order is respected, i.e. the BQL is
acquired first if both locks have to be held at the same time.
In this patch we make the BQL a recursive lock under the hood.
This allows us to (1) keep the BQL and CPU locks interfaces
separate, and (2) use a single lock for all vCPUs in TCG rr mode.
Note that the BQL's API (qemu_mutex_lock/unlock_iothread) remains
non-recursive.
Added cpu_mutex_destroy, and call from cpu_common_finalize,
to avoid destroying qemu_global_mutex, when cpu mutex is destroyed.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
[RF: Fixed destroy issue, added cpu_mutex_destroy.]
Signed-off-by: Robert Foley <robert.foley@linaro.org>
---
cpus-common.c | 2 +-
cpus.c | 101 ++++++++++++++++++++++++++++++++++++++----
hw/core/cpu.c | 5 ++-
include/hw/core/cpu.h | 8 +++-
stubs/cpu-lock.c | 7 +++
5 files changed, 110 insertions(+), 13 deletions(-)
diff --git a/cpus-common.c b/cpus-common.c
index e2f609c8e2..d418eb5aef 100644
--- a/cpus-common.c
+++ b/cpus-common.c
@@ -161,7 +161,7 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
while (!atomic_mb_read(&wi.done)) {
CPUState *self_cpu = current_cpu;
- qemu_cond_wait(&cpu->cond, &cpu->lock);
+ qemu_cond_wait(&cpu->cond, cpu->lock);
current_cpu = self_cpu;
}
cpu_mutex_unlock(cpu);
diff --git a/cpus.c b/cpus.c
index 935fff6e4e..9cc5f4767e 100644
--- a/cpus.c
+++ b/cpus.c
@@ -92,6 +92,12 @@ static unsigned int throttle_percentage;
#define CPU_THROTTLE_PCT_MAX 99
#define CPU_THROTTLE_TIMESLICE_NS 10000000
+static inline bool qemu_is_tcg_rr(void)
+{
+ /* in `make check-qtest', "use_icount && !tcg_enabled()" might be true */
+ return use_icount || (tcg_enabled() && !qemu_tcg_mttcg_enabled());
+}
+
/*
* Note: we index the bitmap with cpu->cpu_index + 1 so that the logic
* also works during early CPU initialization, when cpu->cpu_index is set to
@@ -104,25 +110,75 @@ bool no_cpu_mutex_locked(void)
return bitmap_empty(cpu_lock_bitmap, CPU_LOCK_BITMAP_SIZE);
}
-void cpu_mutex_lock_impl(CPUState *cpu, const char *file, int line)
+static __thread bool iothread_locked;
+/*
+ * In TCG rr mode, we make the BQL a recursive mutex, so that we can use it for
+ * all vCPUs while keeping the interface as if the locks were per-CPU.
+ *
+ * The fact that the BQL is implemented recursively is invisible to BQL users;
+ * the mutex API we export (qemu_mutex_lock_iothread() etc.) is non-recursive.
+ *
+ * Locking order: the BQL is always acquired before CPU locks.
+ */
+static __thread int iothread_lock_count;
+
+static void rr_cpu_mutex_lock(void)
{
-/* coverity gets confused by the indirect function call */
+ if (iothread_lock_count++ == 0) {
+ /*
+ * Circumvent qemu_mutex_lock_iothread()'s state keeping by
+ * acquiring the BQL directly.
+ */
+ qemu_mutex_lock(&qemu_global_mutex);
+ }
+}
+
+static void rr_cpu_mutex_unlock(void)
+{
+ g_assert(iothread_lock_count > 0);
+ if (--iothread_lock_count == 0) {
+ /*
+ * Circumvent qemu_mutex_unlock_iothread()'s state keeping by
+ * releasing the BQL directly.
+ */
+ qemu_mutex_unlock(&qemu_global_mutex);
+ }
+}
+
+static void do_cpu_mutex_lock(CPUState *cpu, const char *file, int line)
+{
+ /* coverity gets confused by the indirect function call */
#ifdef __COVERITY__
- qemu_mutex_lock_impl(&cpu->lock, file, line);
+ qemu_mutex_lock_impl(cpu->lock, file, line);
#else
QemuMutexLockFunc f = atomic_read(&qemu_mutex_lock_func);
+ f(cpu->lock, file, line);
+#endif
+}
+
+void cpu_mutex_lock_impl(CPUState *cpu, const char *file, int line)
+{
g_assert(!cpu_mutex_locked(cpu));
set_bit(cpu->cpu_index + 1, cpu_lock_bitmap);
- f(&cpu->lock, file, line);
-#endif
+
+ if (qemu_is_tcg_rr()) {
+ rr_cpu_mutex_lock();
+ } else {
+ do_cpu_mutex_lock(cpu, file, line);
+ }
}
void cpu_mutex_unlock_impl(CPUState *cpu, const char *file, int line)
{
g_assert(cpu_mutex_locked(cpu));
- qemu_mutex_unlock_impl(&cpu->lock, file, line);
clear_bit(cpu->cpu_index + 1, cpu_lock_bitmap);
+
+ if (qemu_is_tcg_rr()) {
+ rr_cpu_mutex_unlock();
+ return;
+ }
+ qemu_mutex_unlock_impl(cpu->lock, file, line);
}
bool cpu_mutex_locked(const CPUState *cpu)
@@ -130,6 +186,20 @@ bool cpu_mutex_locked(const CPUState *cpu)
return test_bit(cpu->cpu_index + 1, cpu_lock_bitmap);
}
+void cpu_mutex_destroy(CPUState *cpu)
+{
+ /*
+ * In TCG RR, cpu->lock is the BQL under the hood. In all other modes,
+ * cpu->lock is a standalone per-CPU lock.
+ */
+ if (qemu_is_tcg_rr()) {
+ cpu->lock = NULL;
+ } else {
+ qemu_mutex_destroy(cpu->lock);
+ g_free(cpu->lock);
+ }
+}
+
bool cpu_is_stopped(CPUState *cpu)
{
return cpu->stopped || !runstate_is_running();
@@ -1863,8 +1933,6 @@ bool qemu_in_vcpu_thread(void)
return current_cpu && qemu_cpu_is_self(current_cpu);
}
-static __thread bool iothread_locked = false;
-
bool qemu_mutex_iothread_locked(void)
{
return iothread_locked;
@@ -1883,6 +1951,8 @@ void qemu_mutex_lock_iothread_impl(const char *file, int line)
g_assert(!qemu_mutex_iothread_locked());
bql_lock(&qemu_global_mutex, file, line);
+ g_assert(iothread_lock_count == 0);
+ iothread_lock_count++;
iothread_locked = true;
}
@@ -1890,7 +1960,10 @@ void qemu_mutex_unlock_iothread(void)
{
g_assert(qemu_mutex_iothread_locked());
iothread_locked = false;
- qemu_mutex_unlock(&qemu_global_mutex);
+ g_assert(iothread_lock_count > 0);
+ if (--iothread_lock_count == 0) {
+ qemu_mutex_unlock(&qemu_global_mutex);
+ }
}
void qemu_cond_wait_iothread(QemuCond *cond)
@@ -2126,6 +2199,16 @@ void qemu_init_vcpu(CPUState *cpu)
cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
}
+ /*
+ * In TCG RR, cpu->lock is the BQL under the hood. In all other modes,
+ * cpu->lock is a standalone per-CPU lock.
+ */
+ if (qemu_is_tcg_rr()) {
+ qemu_mutex_destroy(cpu->lock);
+ g_free(cpu->lock);
+ cpu->lock = &qemu_global_mutex;
+ }
+
if (kvm_enabled()) {
qemu_kvm_start_vcpu(cpu);
} else if (hax_enabled()) {
diff --git a/hw/core/cpu.c b/hw/core/cpu.c
index 9b9d4296f9..620feed3e9 100644
--- a/hw/core/cpu.c
+++ b/hw/core/cpu.c
@@ -367,7 +367,8 @@ static void cpu_common_initfn(Object *obj)
cpu->nr_cores = 1;
cpu->nr_threads = 1;
- qemu_mutex_init(&cpu->lock);
+ cpu->lock = g_new(QemuMutex, 1);
+ qemu_mutex_init(cpu->lock);
qemu_cond_init(&cpu->cond);
QSIMPLEQ_INIT(&cpu->work_list);
QTAILQ_INIT(&cpu->breakpoints);
@@ -380,7 +381,7 @@ static void cpu_common_finalize(Object *obj)
{
CPUState *cpu = CPU(obj);
- qemu_mutex_destroy(&cpu->lock);
+ cpu_mutex_destroy(cpu);
}
static int64_t cpu_common_get_arch_id(CPUState *cpu)
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index fe79777502..2959ed1b49 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -377,7 +377,7 @@ struct CPUState {
uint64_t random_seed;
sigjmp_buf jmp_env;
- QemuMutex lock;
+ QemuMutex *lock;
/* fields below protected by @lock */
QemuCond cond;
QSIMPLEQ_HEAD(, qemu_work_item) work_list;
@@ -485,6 +485,12 @@ void cpu_mutex_unlock_impl(CPUState *cpu, const char *file, int line);
*/
bool cpu_mutex_locked(const CPUState *cpu);
+/**
+ * cpu_mutex_destroy - Handle how to destroy this CPU's mutex
+ * @cpu: the CPU whose mutex to destroy
+ */
+void cpu_mutex_destroy(CPUState *cpu);
+
/**
* no_cpu_mutex_locked - check whether any CPU mutex is held
*
diff --git a/stubs/cpu-lock.c b/stubs/cpu-lock.c
index 1981a00fb3..97a81f447d 100644
--- a/stubs/cpu-lock.c
+++ b/stubs/cpu-lock.c
@@ -18,3 +18,10 @@ bool no_cpu_mutex_locked(void)
{
return true;
}
+
+void cpu_mutex_destroy(CPUState *cpu)
+{
+ qemu_mutex_destroy(cpu->lock);
+ g_free(cpu->lock);
+ cpu->lock = NULL;
+}
--
2.17.1
next prev parent reply other threads:[~2020-05-21 16:46 UTC|newest]
Thread overview: 79+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-21 16:38 [PATCH v9 00/74] per-CPU locks Robert Foley
2020-05-21 16:38 ` [PATCH v9 01/74] cpu: convert queued work to a QSIMPLEQ Robert Foley
2020-05-21 16:38 ` [PATCH v9 02/74] cpu: rename cpu->work_mutex to cpu->lock Robert Foley
2020-05-21 16:39 ` [PATCH v9 03/74] cpu: introduce cpu_mutex_lock/unlock Robert Foley
2020-05-21 16:39 ` [PATCH v9 04/74] cpu: make qemu_work_cond per-cpu Robert Foley
2020-05-21 16:39 ` [PATCH v9 05/74] cpu: move run_on_cpu to cpus-common Robert Foley
2020-05-21 16:39 ` [PATCH v9 06/74] cpu: introduce process_queued_cpu_work_locked Robert Foley
2020-05-21 16:39 ` Robert Foley [this message]
2020-05-21 16:39 ` [PATCH v9 08/74] tcg-runtime: define helper_cpu_halted_set Robert Foley
2020-05-21 16:39 ` [PATCH v9 09/74] ppc: convert to helper_cpu_halted_set Robert Foley
2020-05-21 16:39 ` [PATCH v9 10/74] cris: " Robert Foley
2020-05-21 16:45 ` Edgar E. Iglesias
2020-05-21 16:39 ` [PATCH v9 11/74] hppa: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 12/74] m68k: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 13/74] alpha: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 14/74] microblaze: " Robert Foley
2020-05-21 16:45 ` Edgar E. Iglesias
2020-05-21 16:39 ` [PATCH v9 15/74] cpu: define cpu_halted helpers Robert Foley
2020-05-21 16:39 ` [PATCH v9 16/74] tcg-runtime: convert to cpu_halted_set Robert Foley
2020-05-21 16:39 ` [PATCH v9 17/74] hw/semihosting: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 18/74] arm: convert to cpu_halted Robert Foley
2020-05-21 16:39 ` [PATCH v9 19/74] ppc: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 20/74] sh4: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 21/74] i386: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 22/74] lm32: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 23/74] m68k: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 24/74] mips: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 25/74] riscv: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 26/74] s390x: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 27/74] sparc: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 28/74] xtensa: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 29/74] gdbstub: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 30/74] openrisc: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 31/74] cpu-exec: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 32/74] cpu: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 33/74] cpu: define cpu_interrupt_request helpers Robert Foley
2020-05-21 16:39 ` [PATCH v9 34/74] ppc: use cpu_reset_interrupt Robert Foley
2020-05-21 16:39 ` [PATCH v9 35/74] exec: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 36/74] i386: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 37/74] s390x: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 38/74] openrisc: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 39/74] arm: convert to cpu_interrupt_request Robert Foley
2020-05-21 16:39 ` [PATCH v9 40/74] i386: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 41/74] i386/kvm: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 42/74] i386/hax-all: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 43/74] i386/whpx-all: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 44/74] i386/hvf: convert to cpu_request_interrupt Robert Foley
2020-05-21 16:39 ` [PATCH v9 45/74] ppc: convert to cpu_interrupt_request Robert Foley
2020-05-21 16:39 ` [PATCH v9 46/74] sh4: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 47/74] cris: " Robert Foley
2020-05-21 16:45 ` Edgar E. Iglesias
2020-05-21 16:39 ` [PATCH v9 48/74] hppa: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 49/74] lm32: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 50/74] m68k: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 51/74] mips: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 52/74] nios: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 53/74] s390x: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 54/74] alpha: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 55/74] moxie: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 56/74] sparc: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 57/74] openrisc: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 58/74] unicore32: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 59/74] microblaze: " Robert Foley
2020-05-21 16:46 ` Edgar E. Iglesias
2020-05-21 16:39 ` [PATCH v9 60/74] accel/tcg: " Robert Foley
2020-05-21 16:39 ` [PATCH v9 61/74] cpu: convert to interrupt_request Robert Foley
2020-05-21 16:39 ` [PATCH v9 62/74] cpu: call .cpu_has_work with the CPU lock held Robert Foley
2020-05-21 16:40 ` [PATCH v9 63/74] cpu: introduce cpu_has_work_with_iothread_lock Robert Foley
2020-05-21 16:40 ` [PATCH v9 64/74] ppc: convert to cpu_has_work_with_iothread_lock Robert Foley
2020-05-21 16:40 ` [PATCH v9 65/74] mips: " Robert Foley
2020-05-21 16:40 ` [PATCH v9 66/74] s390x: " Robert Foley
2020-05-21 16:40 ` [PATCH v9 67/74] riscv: " Robert Foley
2020-05-21 16:40 ` [PATCH v9 68/74] sparc: " Robert Foley
2020-05-21 16:40 ` [PATCH v9 69/74] xtensa: " Robert Foley
2020-05-21 16:40 ` [PATCH v9 70/74] cpu: rename all_cpu_threads_idle to qemu_tcg_rr_all_cpu_threads_idle Robert Foley
2020-05-21 16:40 ` [PATCH v9 71/74] cpu: protect CPU state with cpu->lock instead of the BQL Robert Foley
2020-05-21 16:40 ` [PATCH v9 72/74] cpus-common: release BQL earlier in run_on_cpu Robert Foley
2020-05-21 16:40 ` [PATCH v9 73/74] cpu: add async_run_on_cpu_no_bql Robert Foley
2020-05-21 16:40 ` [PATCH v9 74/74] cputlb: queue async flush jobs without the BQL Robert Foley
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200521164011.638-8-robert.foley@linaro.org \
--to=robert.foley@linaro.org \
--cc=alex.bennee@linaro.org \
--cc=cota@braap.org \
--cc=peter.puhov@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).