* [Qemu-devel] [PATCH] cpus: unify qemu_*_wait_io_event
@ 2018-01-11 12:54 Paolo Bonzini
2018-04-04 10:34 ` Alex Bennée
0 siblings, 1 reply; 4+ messages in thread
From: Paolo Bonzini @ 2018-01-11 12:54 UTC (permalink / raw)
To: qemu-devel; +Cc: Pavel Dovgalyuk, Alex Bennée
Except for round-robin TCG, every other accelerator is using more or
less the same code around qemu_wait_io_event_common. The exception
is HAX, which also has to eat the dummy APC that is queued by
qemu_cpu_kick_thread.
We can add the SleepEx call to qemu_wait_io_event under "if
(!tcg_enabled())", since that is the condition that is used in
qemu_cpu_kick_thread, and unify the function for KVM, HAX, HVF and
multi-threaded TCG. Single-threaded TCG code can also be simplified
since it is only used in the round-robin, sleep-if-all-CPUs-idle case.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
cpus.c | 49 +++++++++++++++++--------------------------------
1 file changed, 17 insertions(+), 32 deletions(-)
diff --git a/cpus.c b/cpus.c
index 440b9291f5..3b6c9879ec 100644
--- a/cpus.c
+++ b/cpus.c
@@ -911,7 +911,8 @@ static void kick_tcg_thread(void *opaque)
static void start_tcg_kick_timer(void)
{
- if (!mttcg_enabled && !tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
+ assert(!mttcg_enabled);
+ if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
kick_tcg_thread, NULL);
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
@@ -920,6 +921,7 @@ static void start_tcg_kick_timer(void)
static void stop_tcg_kick_timer(void)
{
+ assert(!mttcg_enabled);
if (tcg_kick_vcpu_timer) {
timer_del(tcg_kick_vcpu_timer);
tcg_kick_vcpu_timer = NULL;
@@ -1154,18 +1156,9 @@ static void qemu_wait_io_event_common(CPUState *cpu)
process_queued_cpu_work(cpu);
}
-static bool qemu_tcg_should_sleep(CPUState *cpu)
+static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
{
- if (mttcg_enabled) {
- return cpu_thread_is_idle(cpu);
- } else {
- return all_cpu_threads_idle();
- }
-}
-
-static void qemu_tcg_wait_io_event(CPUState *cpu)
-{
- while (qemu_tcg_should_sleep(cpu)) {
+ while (all_cpu_threads_idle()) {
stop_tcg_kick_timer();
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
@@ -1175,20 +1168,18 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
qemu_wait_io_event_common(cpu);
}
-static void qemu_kvm_wait_io_event(CPUState *cpu)
+static void qemu_wait_io_event(CPUState *cpu)
{
while (cpu_thread_is_idle(cpu)) {
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
- qemu_wait_io_event_common(cpu);
-}
-
-static void qemu_hvf_wait_io_event(CPUState *cpu)
-{
- while (cpu_thread_is_idle(cpu)) {
- qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+#ifdef _WIN32
+ /* Eat dummy APC queued by qemu_cpu_kick_thread. */
+ if (!tcg_enabled()) {
+ SleepEx(0, TRUE);
}
+#endif
qemu_wait_io_event_common(cpu);
}
@@ -1224,7 +1215,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
cpu_handle_guest_debug(cpu);
}
}
- qemu_kvm_wait_io_event(cpu);
+ qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
qemu_kvm_destroy_vcpu(cpu);
@@ -1270,7 +1261,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
exit(1);
}
qemu_mutex_lock_iothread();
- qemu_wait_io_event_common(cpu);
+ qemu_wait_io_event(cpu);
}
return NULL;
@@ -1487,7 +1478,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
atomic_mb_set(&cpu->exit_request, 0);
}
- qemu_tcg_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
+ qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
deal_with_unplugged_cpus();
}
@@ -1518,13 +1509,7 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
}
}
- while (cpu_thread_is_idle(cpu)) {
- qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
- }
-#ifdef _WIN32
- SleepEx(0, TRUE);
-#endif
- qemu_wait_io_event_common(cpu);
+ qemu_wait_io_event(cpu);
}
return NULL;
}
@@ -1561,7 +1546,7 @@ static void *qemu_hvf_cpu_thread_fn(void *arg)
cpu_handle_guest_debug(cpu);
}
}
- qemu_hvf_wait_io_event(cpu);
+ qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
hvf_vcpu_destroy(cpu);
@@ -1640,7 +1625,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
}
atomic_mb_set(&cpu->exit_request, 0);
- qemu_tcg_wait_io_event(cpu);
+ qemu_wait_io_event(cpu);
}
return NULL;
--
2.14.3
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [PATCH] cpus: unify qemu_*_wait_io_event
2018-01-11 12:54 [Qemu-devel] [PATCH] cpus: unify qemu_*_wait_io_event Paolo Bonzini
@ 2018-04-04 10:34 ` Alex Bennée
2018-04-04 10:51 ` Paolo Bonzini
0 siblings, 1 reply; 4+ messages in thread
From: Alex Bennée @ 2018-04-04 10:34 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: qemu-devel, Pavel Dovgalyuk
Paolo Bonzini <pbonzini@redhat.com> writes:
> Except for round-robin TCG, every other accelerator is using more or
> less the same code around qemu_wait_io_event_common. The exception
> is HAX, which also has to eat the dummy APC that is queued by
> qemu_cpu_kick_thread.
>
> We can add the SleepEx call to qemu_wait_io_event under "if
> (!tcg_enabled())", since that is the condition that is used in
> qemu_cpu_kick_thread, and unify the function for KVM, HAX, HVF and
> multi-threaded TCG. Single-threaded TCG code can also be simplified
> since it is only used in the round-robin, sleep-if-all-CPUs-idle case.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
I had trouble applying this patch but the change seems sane to me
Acked-by: Alex Bennée <alex.bennee@linaro.org>
> ---
> cpus.c | 49 +++++++++++++++++--------------------------------
> 1 file changed, 17 insertions(+), 32 deletions(-)
>
> diff --git a/cpus.c b/cpus.c
> index 440b9291f5..3b6c9879ec 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -911,7 +911,8 @@ static void kick_tcg_thread(void *opaque)
>
> static void start_tcg_kick_timer(void)
> {
> - if (!mttcg_enabled && !tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
> + assert(!mttcg_enabled);
> + if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
> tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
> kick_tcg_thread, NULL);
> timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
> @@ -920,6 +921,7 @@ static void start_tcg_kick_timer(void)
>
> static void stop_tcg_kick_timer(void)
> {
> + assert(!mttcg_enabled);
> if (tcg_kick_vcpu_timer) {
> timer_del(tcg_kick_vcpu_timer);
> tcg_kick_vcpu_timer = NULL;
> @@ -1154,18 +1156,9 @@ static void qemu_wait_io_event_common(CPUState *cpu)
> process_queued_cpu_work(cpu);
> }
>
> -static bool qemu_tcg_should_sleep(CPUState *cpu)
> +static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
> {
> - if (mttcg_enabled) {
> - return cpu_thread_is_idle(cpu);
> - } else {
> - return all_cpu_threads_idle();
> - }
> -}
> -
> -static void qemu_tcg_wait_io_event(CPUState *cpu)
> -{
> - while (qemu_tcg_should_sleep(cpu)) {
> + while (all_cpu_threads_idle()) {
> stop_tcg_kick_timer();
> qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
> }
> @@ -1175,20 +1168,18 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
> qemu_wait_io_event_common(cpu);
> }
>
> -static void qemu_kvm_wait_io_event(CPUState *cpu)
> +static void qemu_wait_io_event(CPUState *cpu)
> {
> while (cpu_thread_is_idle(cpu)) {
> qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
> }
>
> - qemu_wait_io_event_common(cpu);
> -}
> -
> -static void qemu_hvf_wait_io_event(CPUState *cpu)
> -{
> - while (cpu_thread_is_idle(cpu)) {
> - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
> +#ifdef _WIN32
> + /* Eat dummy APC queued by qemu_cpu_kick_thread. */
> + if (!tcg_enabled()) {
> + SleepEx(0, TRUE);
> }
> +#endif
> qemu_wait_io_event_common(cpu);
> }
>
> @@ -1224,7 +1215,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
> cpu_handle_guest_debug(cpu);
> }
> }
> - qemu_kvm_wait_io_event(cpu);
> + qemu_wait_io_event(cpu);
> } while (!cpu->unplug || cpu_can_run(cpu));
>
> qemu_kvm_destroy_vcpu(cpu);
> @@ -1270,7 +1261,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
> exit(1);
> }
> qemu_mutex_lock_iothread();
> - qemu_wait_io_event_common(cpu);
> + qemu_wait_io_event(cpu);
> }
>
> return NULL;
> @@ -1487,7 +1478,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
> atomic_mb_set(&cpu->exit_request, 0);
> }
>
> - qemu_tcg_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
> + qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
> deal_with_unplugged_cpus();
> }
>
> @@ -1518,13 +1509,7 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
> }
> }
>
> - while (cpu_thread_is_idle(cpu)) {
> - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
> - }
> -#ifdef _WIN32
> - SleepEx(0, TRUE);
> -#endif
> - qemu_wait_io_event_common(cpu);
> + qemu_wait_io_event(cpu);
> }
> return NULL;
> }
> @@ -1561,7 +1546,7 @@ static void *qemu_hvf_cpu_thread_fn(void *arg)
> cpu_handle_guest_debug(cpu);
> }
> }
> - qemu_hvf_wait_io_event(cpu);
> + qemu_wait_io_event(cpu);
> } while (!cpu->unplug || cpu_can_run(cpu));
>
> hvf_vcpu_destroy(cpu);
> @@ -1640,7 +1625,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
> }
>
> atomic_mb_set(&cpu->exit_request, 0);
> - qemu_tcg_wait_io_event(cpu);
> + qemu_wait_io_event(cpu);
> }
>
> return NULL;
--
Alex Bennée
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [PATCH] cpus: unify qemu_*_wait_io_event
2018-04-04 10:34 ` Alex Bennée
@ 2018-04-04 10:51 ` Paolo Bonzini
2018-04-04 12:50 ` Alex Bennée
0 siblings, 1 reply; 4+ messages in thread
From: Paolo Bonzini @ 2018-04-04 10:51 UTC (permalink / raw)
To: Alex Bennée; +Cc: qemu-devel, Pavel Dovgalyuk
On 04/04/2018 12:34, Alex Bennée wrote:
>
> Paolo Bonzini <pbonzini@redhat.com> writes:
>
>> Except for round-robin TCG, every other accelerator is using more or
>> less the same code around qemu_wait_io_event_common. The exception
>> is HAX, which also has to eat the dummy APC that is queued by
>> qemu_cpu_kick_thread.
>>
>> We can add the SleepEx call to qemu_wait_io_event under "if
>> (!tcg_enabled())", since that is the condition that is used in
>> qemu_cpu_kick_thread, and unify the function for KVM, HAX, HVF and
>> multi-threaded TCG. Single-threaded TCG code can also be simplified
>> since it is only used in the round-robin, sleep-if-all-CPUs-idle case.
>>
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>
> I had trouble applying this patch but the change seems sane to me
>
> Acked-by: Alex Bennée <alex.bennee@linaro.org>
Isn't this commit db08b687cdd5319286665aabd34f82665630416f?
Paolo
>> ---
>> cpus.c | 49 +++++++++++++++++--------------------------------
>> 1 file changed, 17 insertions(+), 32 deletions(-)
>>
>> diff --git a/cpus.c b/cpus.c
>> index 440b9291f5..3b6c9879ec 100644
>> --- a/cpus.c
>> +++ b/cpus.c
>> @@ -911,7 +911,8 @@ static void kick_tcg_thread(void *opaque)
>>
>> static void start_tcg_kick_timer(void)
>> {
>> - if (!mttcg_enabled && !tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
>> + assert(!mttcg_enabled);
>> + if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
>> tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
>> kick_tcg_thread, NULL);
>> timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
>> @@ -920,6 +921,7 @@ static void start_tcg_kick_timer(void)
>>
>> static void stop_tcg_kick_timer(void)
>> {
>> + assert(!mttcg_enabled);
>> if (tcg_kick_vcpu_timer) {
>> timer_del(tcg_kick_vcpu_timer);
>> tcg_kick_vcpu_timer = NULL;
>> @@ -1154,18 +1156,9 @@ static void qemu_wait_io_event_common(CPUState *cpu)
>> process_queued_cpu_work(cpu);
>> }
>>
>> -static bool qemu_tcg_should_sleep(CPUState *cpu)
>> +static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
>> {
>> - if (mttcg_enabled) {
>> - return cpu_thread_is_idle(cpu);
>> - } else {
>> - return all_cpu_threads_idle();
>> - }
>> -}
>> -
>> -static void qemu_tcg_wait_io_event(CPUState *cpu)
>> -{
>> - while (qemu_tcg_should_sleep(cpu)) {
>> + while (all_cpu_threads_idle()) {
>> stop_tcg_kick_timer();
>> qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>> }
>> @@ -1175,20 +1168,18 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
>> qemu_wait_io_event_common(cpu);
>> }
>>
>> -static void qemu_kvm_wait_io_event(CPUState *cpu)
>> +static void qemu_wait_io_event(CPUState *cpu)
>> {
>> while (cpu_thread_is_idle(cpu)) {
>> qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>> }
>>
>> - qemu_wait_io_event_common(cpu);
>> -}
>> -
>> -static void qemu_hvf_wait_io_event(CPUState *cpu)
>> -{
>> - while (cpu_thread_is_idle(cpu)) {
>> - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>> +#ifdef _WIN32
>> + /* Eat dummy APC queued by qemu_cpu_kick_thread. */
>> + if (!tcg_enabled()) {
>> + SleepEx(0, TRUE);
>> }
>> +#endif
>> qemu_wait_io_event_common(cpu);
>> }
>>
>> @@ -1224,7 +1215,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
>> cpu_handle_guest_debug(cpu);
>> }
>> }
>> - qemu_kvm_wait_io_event(cpu);
>> + qemu_wait_io_event(cpu);
>> } while (!cpu->unplug || cpu_can_run(cpu));
>>
>> qemu_kvm_destroy_vcpu(cpu);
>> @@ -1270,7 +1261,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
>> exit(1);
>> }
>> qemu_mutex_lock_iothread();
>> - qemu_wait_io_event_common(cpu);
>> + qemu_wait_io_event(cpu);
>> }
>>
>> return NULL;
>> @@ -1487,7 +1478,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
>> atomic_mb_set(&cpu->exit_request, 0);
>> }
>>
>> - qemu_tcg_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
>> + qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
>> deal_with_unplugged_cpus();
>> }
>>
>> @@ -1518,13 +1509,7 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
>> }
>> }
>>
>> - while (cpu_thread_is_idle(cpu)) {
>> - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>> - }
>> -#ifdef _WIN32
>> - SleepEx(0, TRUE);
>> -#endif
>> - qemu_wait_io_event_common(cpu);
>> + qemu_wait_io_event(cpu);
>> }
>> return NULL;
>> }
>> @@ -1561,7 +1546,7 @@ static void *qemu_hvf_cpu_thread_fn(void *arg)
>> cpu_handle_guest_debug(cpu);
>> }
>> }
>> - qemu_hvf_wait_io_event(cpu);
>> + qemu_wait_io_event(cpu);
>> } while (!cpu->unplug || cpu_can_run(cpu));
>>
>> hvf_vcpu_destroy(cpu);
>> @@ -1640,7 +1625,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
>> }
>>
>> atomic_mb_set(&cpu->exit_request, 0);
>> - qemu_tcg_wait_io_event(cpu);
>> + qemu_wait_io_event(cpu);
>> }
>>
>> return NULL;
>
>
> --
> Alex Bennée
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [PATCH] cpus: unify qemu_*_wait_io_event
2018-04-04 10:51 ` Paolo Bonzini
@ 2018-04-04 12:50 ` Alex Bennée
0 siblings, 0 replies; 4+ messages in thread
From: Alex Bennée @ 2018-04-04 12:50 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: qemu-devel, Pavel Dovgalyuk
Paolo Bonzini <pbonzini@redhat.com> writes:
> On 04/04/2018 12:34, Alex Bennée wrote:
>>
>> Paolo Bonzini <pbonzini@redhat.com> writes:
>>
>>> Except for round-robin TCG, every other accelerator is using more or
>>> less the same code around qemu_wait_io_event_common. The exception
>>> is HAX, which also has to eat the dummy APC that is queued by
>>> qemu_cpu_kick_thread.
>>>
>>> We can add the SleepEx call to qemu_wait_io_event under "if
>>> (!tcg_enabled())", since that is the condition that is used in
>>> qemu_cpu_kick_thread, and unify the function for KVM, HAX, HVF and
>>> multi-threaded TCG. Single-threaded TCG code can also be simplified
>>> since it is only used in the round-robin, sleep-if-all-CPUs-idle case.
>>>
>>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>>
>> I had trouble applying this patch but the change seems sane to me
>>
>> Acked-by: Alex Bennée <alex.bennee@linaro.org>
>
> Isn't this commit db08b687cdd5319286665aabd34f82665630416f?
Doh sorry, that makes even more sense. My check-if-merged script failed
me...
>
> Paolo
>
>>> ---
>>> cpus.c | 49 +++++++++++++++++--------------------------------
>>> 1 file changed, 17 insertions(+), 32 deletions(-)
>>>
>>> diff --git a/cpus.c b/cpus.c
>>> index 440b9291f5..3b6c9879ec 100644
>>> --- a/cpus.c
>>> +++ b/cpus.c
>>> @@ -911,7 +911,8 @@ static void kick_tcg_thread(void *opaque)
>>>
>>> static void start_tcg_kick_timer(void)
>>> {
>>> - if (!mttcg_enabled && !tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
>>> + assert(!mttcg_enabled);
>>> + if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
>>> tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
>>> kick_tcg_thread, NULL);
>>> timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
>>> @@ -920,6 +921,7 @@ static void start_tcg_kick_timer(void)
>>>
>>> static void stop_tcg_kick_timer(void)
>>> {
>>> + assert(!mttcg_enabled);
>>> if (tcg_kick_vcpu_timer) {
>>> timer_del(tcg_kick_vcpu_timer);
>>> tcg_kick_vcpu_timer = NULL;
>>> @@ -1154,18 +1156,9 @@ static void qemu_wait_io_event_common(CPUState *cpu)
>>> process_queued_cpu_work(cpu);
>>> }
>>>
>>> -static bool qemu_tcg_should_sleep(CPUState *cpu)
>>> +static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
>>> {
>>> - if (mttcg_enabled) {
>>> - return cpu_thread_is_idle(cpu);
>>> - } else {
>>> - return all_cpu_threads_idle();
>>> - }
>>> -}
>>> -
>>> -static void qemu_tcg_wait_io_event(CPUState *cpu)
>>> -{
>>> - while (qemu_tcg_should_sleep(cpu)) {
>>> + while (all_cpu_threads_idle()) {
>>> stop_tcg_kick_timer();
>>> qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>>> }
>>> @@ -1175,20 +1168,18 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
>>> qemu_wait_io_event_common(cpu);
>>> }
>>>
>>> -static void qemu_kvm_wait_io_event(CPUState *cpu)
>>> +static void qemu_wait_io_event(CPUState *cpu)
>>> {
>>> while (cpu_thread_is_idle(cpu)) {
>>> qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>>> }
>>>
>>> - qemu_wait_io_event_common(cpu);
>>> -}
>>> -
>>> -static void qemu_hvf_wait_io_event(CPUState *cpu)
>>> -{
>>> - while (cpu_thread_is_idle(cpu)) {
>>> - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>>> +#ifdef _WIN32
>>> + /* Eat dummy APC queued by qemu_cpu_kick_thread. */
>>> + if (!tcg_enabled()) {
>>> + SleepEx(0, TRUE);
>>> }
>>> +#endif
>>> qemu_wait_io_event_common(cpu);
>>> }
>>>
>>> @@ -1224,7 +1215,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
>>> cpu_handle_guest_debug(cpu);
>>> }
>>> }
>>> - qemu_kvm_wait_io_event(cpu);
>>> + qemu_wait_io_event(cpu);
>>> } while (!cpu->unplug || cpu_can_run(cpu));
>>>
>>> qemu_kvm_destroy_vcpu(cpu);
>>> @@ -1270,7 +1261,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
>>> exit(1);
>>> }
>>> qemu_mutex_lock_iothread();
>>> - qemu_wait_io_event_common(cpu);
>>> + qemu_wait_io_event(cpu);
>>> }
>>>
>>> return NULL;
>>> @@ -1487,7 +1478,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
>>> atomic_mb_set(&cpu->exit_request, 0);
>>> }
>>>
>>> - qemu_tcg_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
>>> + qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
>>> deal_with_unplugged_cpus();
>>> }
>>>
>>> @@ -1518,13 +1509,7 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
>>> }
>>> }
>>>
>>> - while (cpu_thread_is_idle(cpu)) {
>>> - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>>> - }
>>> -#ifdef _WIN32
>>> - SleepEx(0, TRUE);
>>> -#endif
>>> - qemu_wait_io_event_common(cpu);
>>> + qemu_wait_io_event(cpu);
>>> }
>>> return NULL;
>>> }
>>> @@ -1561,7 +1546,7 @@ static void *qemu_hvf_cpu_thread_fn(void *arg)
>>> cpu_handle_guest_debug(cpu);
>>> }
>>> }
>>> - qemu_hvf_wait_io_event(cpu);
>>> + qemu_wait_io_event(cpu);
>>> } while (!cpu->unplug || cpu_can_run(cpu));
>>>
>>> hvf_vcpu_destroy(cpu);
>>> @@ -1640,7 +1625,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
>>> }
>>>
>>> atomic_mb_set(&cpu->exit_request, 0);
>>> - qemu_tcg_wait_io_event(cpu);
>>> + qemu_wait_io_event(cpu);
>>> }
>>>
>>> return NULL;
>>
>>
>> --
>> Alex Bennée
>>
--
Alex Bennée
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2018-04-04 12:50 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-01-11 12:54 [Qemu-devel] [PATCH] cpus: unify qemu_*_wait_io_event Paolo Bonzini
2018-04-04 10:34 ` Alex Bennée
2018-04-04 10:51 ` Paolo Bonzini
2018-04-04 12:50 ` Alex Bennée
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).