From: "Alex Bennée" <alex.bennee@linaro.org>
To: "Emilio G. Cota" <cota@braap.org>
Cc: qemu-devel@nongnu.org, Paolo Bonzini <pbonzini@redhat.com>,
Richard Henderson <richard.henderson@linaro.org>
Subject: Re: [Qemu-devel] [PATCH v6 07/73] cpu: make per-CPU locks an alias of the BQL in TCG rr mode
Date: Thu, 07 Feb 2019 12:40:02 +0000 [thread overview]
Message-ID: <87va1vn55p.fsf@zen.linaroharston> (raw)
In-Reply-To: <20190130004811.27372-8-cota@braap.org>
Emilio G. Cota <cota@braap.org> writes:
> Before we can switch from the BQL to per-CPU locks in
> the CPU loop, we have to accommodate the fact that TCG
> rr mode (i.e. !MTTCG) cannot work with separate per-vCPU
> locks. That would lead to deadlock since we need a single
> lock/condvar pair on which to wait for events that affect
> any vCPU, e.g. in qemu_tcg_rr_wait_io_event.
>
> At the same time, we are moving towards an interface where
> the BQL and CPU locks are independent, and the only requirement
> is that the locking order is respected, i.e. the BQL is
> acquired first if both locks have to be held at the same time.
>
> In this patch we make the BQL a recursive lock under the hood.
> This allows us to (1) keep the BQL and CPU locks interfaces
> separate, and (2) use a single lock for all vCPUs in TCG rr mode.
>
> Note that the BQL's API (qemu_mutex_lock/unlock_iothread) remains
> non-recursive.
>
> Signed-off-by: Emilio G. Cota <cota@braap.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
> ---
> include/qom/cpu.h | 2 +-
> cpus-common.c | 2 +-
> cpus.c | 90 +++++++++++++++++++++++++++++++++++++++++------
> qom/cpu.c | 3 +-
> stubs/cpu-lock.c | 6 ++--
> 5 files changed, 86 insertions(+), 17 deletions(-)
>
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index fe389037c5..8b85a036cf 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -363,7 +363,7 @@ struct CPUState {
> int64_t icount_extra;
> sigjmp_buf jmp_env;
>
> - QemuMutex lock;
> + QemuMutex *lock;
> /* fields below protected by @lock */
> QemuCond cond;
> QSIMPLEQ_HEAD(, qemu_work_item) work_list;
> diff --git a/cpus-common.c b/cpus-common.c
> index 99662bfa87..62e282bff1 100644
> --- a/cpus-common.c
> +++ b/cpus-common.c
> @@ -171,7 +171,7 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
> while (!atomic_mb_read(&wi.done)) {
> CPUState *self_cpu = current_cpu;
>
> - qemu_cond_wait(&cpu->cond, &cpu->lock);
> + qemu_cond_wait(&cpu->cond, cpu->lock);
> current_cpu = self_cpu;
> }
> cpu_mutex_unlock(cpu);
> diff --git a/cpus.c b/cpus.c
> index 755e4addab..c4fa3cc876 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -83,6 +83,12 @@ static unsigned int throttle_percentage;
> #define CPU_THROTTLE_PCT_MAX 99
> #define CPU_THROTTLE_TIMESLICE_NS 10000000
>
> +static inline bool qemu_is_tcg_rr(void)
> +{
> + /* in `make check-qtest', "use_icount && !tcg_enabled()" might be true */
> + return use_icount || (tcg_enabled() && !qemu_tcg_mttcg_enabled());
> +}
> +
> /* XXX: is this really the max number of CPUs? */
> #define CPU_LOCK_BITMAP_SIZE 2048
>
> @@ -98,25 +104,76 @@ bool no_cpu_mutex_locked(void)
> return bitmap_empty(cpu_lock_bitmap, CPU_LOCK_BITMAP_SIZE);
> }
>
> -void cpu_mutex_lock_impl(CPUState *cpu, const char *file, int line)
> +static QemuMutex qemu_global_mutex;
> +static __thread bool iothread_locked;
> +/*
> + * In TCG rr mode, we make the BQL a recursive mutex, so that we can use it for
> + * all vCPUs while keeping the interface as if the locks were per-CPU.
> + *
> + * The fact that the BQL is implemented recursively is invisible to BQL users;
> + * the mutex API we export (qemu_mutex_lock_iothread() etc.) is non-recursive.
> + *
> + * Locking order: the BQL is always acquired before CPU locks.
> + */
> +static __thread int iothread_lock_count;
> +
> +static void rr_cpu_mutex_lock(void)
> +{
> + if (iothread_lock_count++ == 0) {
> + /*
> + * Circumvent qemu_mutex_lock_iothread()'s state keeping by
> + * acquiring the BQL directly.
> + */
> + qemu_mutex_lock(&qemu_global_mutex);
> + }
> +}
> +
> +static void rr_cpu_mutex_unlock(void)
> +{
> + g_assert(iothread_lock_count > 0);
> + if (--iothread_lock_count == 0) {
> + /*
> + * Circumvent qemu_mutex_unlock_iothread()'s state keeping by
> + * releasing the BQL directly.
> + */
> + qemu_mutex_unlock(&qemu_global_mutex);
> + }
> +}
> +
> +static void do_cpu_mutex_lock(CPUState *cpu, const char *file, int line)
> {
> -/* coverity gets confused by the indirect function call */
> + /* coverity gets confused by the indirect function call */
> #ifdef __COVERITY__
> - qemu_mutex_lock_impl(&cpu->lock, file, line);
> + qemu_mutex_lock_impl(cpu->lock, file, line);
> #else
> QemuMutexLockFunc f = atomic_read(&qemu_mutex_lock_func);
>
> + f(cpu->lock, file, line);
> +#endif
> +}
> +
> +void cpu_mutex_lock_impl(CPUState *cpu, const char *file, int line)
> +{
> g_assert(!cpu_mutex_locked(cpu));
> set_bit(cpu->cpu_index + 1, cpu_lock_bitmap);
> - f(&cpu->lock, file, line);
> -#endif
> +
> + if (qemu_is_tcg_rr()) {
> + rr_cpu_mutex_lock();
> + } else {
> + do_cpu_mutex_lock(cpu, file, line);
> + }
> }
>
> void cpu_mutex_unlock_impl(CPUState *cpu, const char *file, int line)
> {
> g_assert(cpu_mutex_locked(cpu));
> - qemu_mutex_unlock_impl(&cpu->lock, file, line);
> clear_bit(cpu->cpu_index + 1, cpu_lock_bitmap);
> +
> + if (qemu_is_tcg_rr()) {
> + rr_cpu_mutex_unlock();
> + return;
> + }
> + qemu_mutex_unlock_impl(cpu->lock, file, line);
> }
>
> bool cpu_mutex_locked(const CPUState *cpu)
> @@ -1215,8 +1272,6 @@ static void qemu_init_sigbus(void)
> }
> #endif /* !CONFIG_LINUX */
>
> -static QemuMutex qemu_global_mutex;
> -
> static QemuThread io_thread;
>
> /* cpu creation */
> @@ -1876,8 +1931,6 @@ bool qemu_in_vcpu_thread(void)
> return current_cpu && qemu_cpu_is_self(current_cpu);
> }
>
> -static __thread bool iothread_locked = false;
> -
> bool qemu_mutex_iothread_locked(void)
> {
> return iothread_locked;
> @@ -1896,6 +1949,8 @@ void qemu_mutex_lock_iothread_impl(const char *file, int line)
>
> g_assert(!qemu_mutex_iothread_locked());
> bql_lock(&qemu_global_mutex, file, line);
> + g_assert(iothread_lock_count == 0);
> + iothread_lock_count++;
> iothread_locked = true;
> }
>
> @@ -1903,7 +1958,10 @@ void qemu_mutex_unlock_iothread(void)
> {
> g_assert(qemu_mutex_iothread_locked());
> iothread_locked = false;
> - qemu_mutex_unlock(&qemu_global_mutex);
> + g_assert(iothread_lock_count > 0);
> + if (--iothread_lock_count == 0) {
> + qemu_mutex_unlock(&qemu_global_mutex);
> + }
> }
>
> static bool all_vcpus_paused(void)
> @@ -2127,6 +2185,16 @@ void qemu_init_vcpu(CPUState *cpu)
> cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
> }
>
> + /*
> + * In TCG RR, cpu->lock is the BQL under the hood. In all other modes,
> + * cpu->lock is a standalone per-CPU lock.
> + */
> + if (qemu_is_tcg_rr()) {
> + qemu_mutex_destroy(cpu->lock);
> + g_free(cpu->lock);
> + cpu->lock = &qemu_global_mutex;
> + }
> +
> if (kvm_enabled()) {
> qemu_kvm_start_vcpu(cpu);
> } else if (hax_enabled()) {
> diff --git a/qom/cpu.c b/qom/cpu.c
> index be8393e589..2c05aa1bca 100644
> --- a/qom/cpu.c
> +++ b/qom/cpu.c
> @@ -371,7 +371,8 @@ static void cpu_common_initfn(Object *obj)
> cpu->nr_cores = 1;
> cpu->nr_threads = 1;
>
> - qemu_mutex_init(&cpu->lock);
> + cpu->lock = g_new(QemuMutex, 1);
> + qemu_mutex_init(cpu->lock);
> qemu_cond_init(&cpu->cond);
> QSIMPLEQ_INIT(&cpu->work_list);
> QTAILQ_INIT(&cpu->breakpoints);
> diff --git a/stubs/cpu-lock.c b/stubs/cpu-lock.c
> index 3f07d3a28b..7406a66d97 100644
> --- a/stubs/cpu-lock.c
> +++ b/stubs/cpu-lock.c
> @@ -5,16 +5,16 @@ void cpu_mutex_lock_impl(CPUState *cpu, const char *file, int line)
> {
> /* coverity gets confused by the indirect function call */
> #ifdef __COVERITY__
> - qemu_mutex_lock_impl(&cpu->lock, file, line);
> + qemu_mutex_lock_impl(cpu->lock, file, line);
> #else
> QemuMutexLockFunc f = atomic_read(&qemu_mutex_lock_func);
> - f(&cpu->lock, file, line);
> + f(cpu->lock, file, line);
> #endif
> }
>
> void cpu_mutex_unlock_impl(CPUState *cpu, const char *file, int line)
> {
> - qemu_mutex_unlock_impl(&cpu->lock, file, line);
> + qemu_mutex_unlock_impl(cpu->lock, file, line);
> }
>
> bool cpu_mutex_locked(const CPUState *cpu)
--
Alex Bennée
next prev parent reply other threads:[~2019-02-07 12:40 UTC|newest]
Thread overview: 109+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-01-30 0:46 [Qemu-devel] [PATCH v6 00/73] per-CPU locks Emilio G. Cota
2019-01-30 0:46 ` [Qemu-devel] [PATCH v6 01/73] cpu: convert queued work to a QSIMPLEQ Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 02/73] cpu: rename cpu->work_mutex to cpu->lock Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 03/73] cpu: introduce cpu_mutex_lock/unlock Emilio G. Cota
2019-02-06 17:21 ` Alex Bennée
2019-02-06 20:02 ` Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 04/73] cpu: make qemu_work_cond per-cpu Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 05/73] cpu: move run_on_cpu to cpus-common Emilio G. Cota
2019-02-06 17:22 ` Alex Bennée
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 06/73] cpu: introduce process_queued_cpu_work_locked Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 07/73] cpu: make per-CPU locks an alias of the BQL in TCG rr mode Emilio G. Cota
2019-02-07 12:40 ` Alex Bennée [this message]
2019-02-20 16:12 ` Richard Henderson
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 08/73] tcg-runtime: define helper_cpu_halted_set Emilio G. Cota
2019-02-07 12:40 ` Alex Bennée
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 09/73] ppc: convert to helper_cpu_halted_set Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 10/73] cris: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 11/73] hppa: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 12/73] m68k: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 13/73] alpha: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 14/73] microblaze: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 15/73] cpu: define cpu_halted helpers Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 16/73] tcg-runtime: convert to cpu_halted_set Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 17/73] arm: convert to cpu_halted Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 18/73] ppc: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 19/73] sh4: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 20/73] i386: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 21/73] lm32: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 22/73] m68k: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 23/73] mips: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 24/73] riscv: " Emilio G. Cota
2019-02-06 23:50 ` Alistair Francis
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 25/73] s390x: " Emilio G. Cota
2019-01-30 10:30 ` Cornelia Huck
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 26/73] sparc: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 27/73] xtensa: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 28/73] gdbstub: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 29/73] openrisc: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 30/73] cpu-exec: " Emilio G. Cota
2019-02-07 12:44 ` Alex Bennée
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 31/73] cpu: " Emilio G. Cota
2019-02-07 20:39 ` Alex Bennée
2019-02-20 16:21 ` Richard Henderson
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 32/73] cpu: define cpu_interrupt_request helpers Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 33/73] ppc: use cpu_reset_interrupt Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 34/73] exec: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 35/73] i386: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 36/73] s390x: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 37/73] openrisc: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 38/73] arm: convert to cpu_interrupt_request Emilio G. Cota
2019-02-07 20:55 ` Alex Bennée
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 39/73] i386: " Emilio G. Cota
2019-02-08 11:00 ` Alex Bennée
2019-03-02 22:48 ` Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 40/73] i386/kvm: " Emilio G. Cota
2019-02-08 11:15 ` Alex Bennée
2019-03-02 23:14 ` Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 41/73] i386/hax-all: " Emilio G. Cota
2019-02-08 11:20 ` Alex Bennée
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 42/73] i386/whpx-all: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 43/73] i386/hvf: convert to cpu_request_interrupt Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 44/73] ppc: convert to cpu_interrupt_request Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 45/73] sh4: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 46/73] cris: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 47/73] hppa: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 48/73] lm32: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 49/73] m68k: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 50/73] mips: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 51/73] nios: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 52/73] s390x: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 53/73] alpha: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 54/73] moxie: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 55/73] sparc: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 56/73] openrisc: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 57/73] unicore32: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 58/73] microblaze: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 59/73] accel/tcg: " Emilio G. Cota
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 60/73] cpu: convert to interrupt_request Emilio G. Cota
2019-02-08 11:21 ` Alex Bennée
2019-02-20 16:55 ` Richard Henderson
2019-01-30 0:47 ` [Qemu-devel] [PATCH v6 61/73] cpu: call .cpu_has_work with the CPU lock held Emilio G. Cota
2019-02-08 11:22 ` Alex Bennée
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 62/73] cpu: introduce cpu_has_work_with_iothread_lock Emilio G. Cota
2019-02-08 11:33 ` Alex Bennée
2019-03-03 19:52 ` Emilio G. Cota
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 63/73] ppc: convert to cpu_has_work_with_iothread_lock Emilio G. Cota
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 64/73] mips: " Emilio G. Cota
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 65/73] s390x: " Emilio G. Cota
2019-01-30 10:35 ` Cornelia Huck
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 66/73] riscv: " Emilio G. Cota
2019-02-06 23:51 ` Alistair Francis
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 67/73] sparc: " Emilio G. Cota
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 68/73] xtensa: " Emilio G. Cota
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 69/73] cpu: rename all_cpu_threads_idle to qemu_tcg_rr_all_cpu_threads_idle Emilio G. Cota
2019-02-08 11:34 ` Alex Bennée
2019-02-20 17:01 ` Richard Henderson
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 70/73] cpu: protect CPU state with cpu->lock instead of the BQL Emilio G. Cota
2019-02-08 14:33 ` Alex Bennée
2019-02-20 17:25 ` Richard Henderson
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 71/73] cpus-common: release BQL earlier in run_on_cpu Emilio G. Cota
2019-02-08 14:34 ` Alex Bennée
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 72/73] cpu: add async_run_on_cpu_no_bql Emilio G. Cota
2019-02-08 14:58 ` Alex Bennée
2019-03-03 20:47 ` Emilio G. Cota
2019-01-30 0:48 ` [Qemu-devel] [PATCH v6 73/73] cputlb: queue async flush jobs without the BQL Emilio G. Cota
2019-02-08 15:58 ` Alex Bennée
2019-02-20 17:18 ` Richard Henderson
2019-02-20 17:27 ` [Qemu-devel] [PATCH v6 00/73] per-CPU locks Richard Henderson
2019-02-20 22:50 ` Emilio G. Cota
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87va1vn55p.fsf@zen.linaroharston \
--to=alex.bennee@linaro.org \
--cc=cota@braap.org \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).