From: "Alex Bennée" <alex.bennee@linaro.org>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: qemu-devel@nongnu.org, Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru>
Subject: Re: [Qemu-devel] [PATCH] cpus: unify qemu_*_wait_io_event
Date: Wed, 04 Apr 2018 11:34:53 +0100 [thread overview]
Message-ID: <87lge3cl9u.fsf@linaro.org> (raw)
In-Reply-To: <20180111125444.6498-1-pbonzini@redhat.com>
Paolo Bonzini <pbonzini@redhat.com> writes:
> Except for round-robin TCG, every other accelerator is using more or
> less the same code around qemu_wait_io_event_common. The exception
> is HAX, which also has to eat the dummy APC that is queued by
> qemu_cpu_kick_thread.
>
> We can add the SleepEx call to qemu_wait_io_event under "if
> (!tcg_enabled())", since that is the condition that is used in
> qemu_cpu_kick_thread, and unify the function for KVM, HAX, HVF and
> multi-threaded TCG. Single-threaded TCG code can also be simplified
> since it is only used in the round-robin, sleep-if-all-CPUs-idle case.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
I had trouble applying this patch but the change seems sane to me
Acked-by: Alex Bennée <alex.bennee@linaro.org>
> ---
> cpus.c | 49 +++++++++++++++++--------------------------------
> 1 file changed, 17 insertions(+), 32 deletions(-)
>
> diff --git a/cpus.c b/cpus.c
> index 440b9291f5..3b6c9879ec 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -911,7 +911,8 @@ static void kick_tcg_thread(void *opaque)
>
> static void start_tcg_kick_timer(void)
> {
> - if (!mttcg_enabled && !tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
> + assert(!mttcg_enabled);
> + if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
> tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
> kick_tcg_thread, NULL);
> timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
> @@ -920,6 +921,7 @@ static void start_tcg_kick_timer(void)
>
> static void stop_tcg_kick_timer(void)
> {
> + assert(!mttcg_enabled);
> if (tcg_kick_vcpu_timer) {
> timer_del(tcg_kick_vcpu_timer);
> tcg_kick_vcpu_timer = NULL;
> @@ -1154,18 +1156,9 @@ static void qemu_wait_io_event_common(CPUState *cpu)
> process_queued_cpu_work(cpu);
> }
>
> -static bool qemu_tcg_should_sleep(CPUState *cpu)
> +static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
> {
> - if (mttcg_enabled) {
> - return cpu_thread_is_idle(cpu);
> - } else {
> - return all_cpu_threads_idle();
> - }
> -}
> -
> -static void qemu_tcg_wait_io_event(CPUState *cpu)
> -{
> - while (qemu_tcg_should_sleep(cpu)) {
> + while (all_cpu_threads_idle()) {
> stop_tcg_kick_timer();
> qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
> }
> @@ -1175,20 +1168,18 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
> qemu_wait_io_event_common(cpu);
> }
>
> -static void qemu_kvm_wait_io_event(CPUState *cpu)
> +static void qemu_wait_io_event(CPUState *cpu)
> {
> while (cpu_thread_is_idle(cpu)) {
> qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
> }
>
> - qemu_wait_io_event_common(cpu);
> -}
> -
> -static void qemu_hvf_wait_io_event(CPUState *cpu)
> -{
> - while (cpu_thread_is_idle(cpu)) {
> - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
> +#ifdef _WIN32
> + /* Eat dummy APC queued by qemu_cpu_kick_thread. */
> + if (!tcg_enabled()) {
> + SleepEx(0, TRUE);
> }
> +#endif
> qemu_wait_io_event_common(cpu);
> }
>
> @@ -1224,7 +1215,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
> cpu_handle_guest_debug(cpu);
> }
> }
> - qemu_kvm_wait_io_event(cpu);
> + qemu_wait_io_event(cpu);
> } while (!cpu->unplug || cpu_can_run(cpu));
>
> qemu_kvm_destroy_vcpu(cpu);
> @@ -1270,7 +1261,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
> exit(1);
> }
> qemu_mutex_lock_iothread();
> - qemu_wait_io_event_common(cpu);
> + qemu_wait_io_event(cpu);
> }
>
> return NULL;
> @@ -1487,7 +1478,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
> atomic_mb_set(&cpu->exit_request, 0);
> }
>
> - qemu_tcg_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
> + qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
> deal_with_unplugged_cpus();
> }
>
> @@ -1518,13 +1509,7 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
> }
> }
>
> - while (cpu_thread_is_idle(cpu)) {
> - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
> - }
> -#ifdef _WIN32
> - SleepEx(0, TRUE);
> -#endif
> - qemu_wait_io_event_common(cpu);
> + qemu_wait_io_event(cpu);
> }
> return NULL;
> }
> @@ -1561,7 +1546,7 @@ static void *qemu_hvf_cpu_thread_fn(void *arg)
> cpu_handle_guest_debug(cpu);
> }
> }
> - qemu_hvf_wait_io_event(cpu);
> + qemu_wait_io_event(cpu);
> } while (!cpu->unplug || cpu_can_run(cpu));
>
> hvf_vcpu_destroy(cpu);
> @@ -1640,7 +1625,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
> }
>
> atomic_mb_set(&cpu->exit_request, 0);
> - qemu_tcg_wait_io_event(cpu);
> + qemu_wait_io_event(cpu);
> }
>
> return NULL;
--
Alex Bennée
next prev parent reply other threads:[~2018-04-04 10:35 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-11 12:54 [Qemu-devel] [PATCH] cpus: unify qemu_*_wait_io_event Paolo Bonzini
2018-04-04 10:34 ` Alex Bennée [this message]
2018-04-04 10:51 ` Paolo Bonzini
2018-04-04 12:50 ` Alex Bennée
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87lge3cl9u.fsf@linaro.org \
--to=alex.bennee@linaro.org \
--cc=Pavel.Dovgaluk@ispras.ru \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).