* [PATCH] qemu-kvm: fix guest resetting
@ 2008-05-08 8:29 Jan Kiszka
2008-05-08 23:35 ` Marcelo Tosatti
0 siblings, 1 reply; 6+ messages in thread
From: Jan Kiszka @ 2008-05-08 8:29 UTC (permalink / raw)
To: kvm-devel
Resetting guests used to be racy, deadlock-prone, or simply broken (for
SMP). This patch fixes the issues - at least for me on x86 (tested on
Intel SMP host, UP and SMP guest, in-kernel und user space irqchip,
guest- and monitor-issued resets). Note that ia64 and powerpc may need
to look into the SMP thing as well (=>kvm_arch_cpu_reset).
At this chance, the patch also cleans up some unneeded reset fragments.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---
qemu/qemu-kvm-ia64.c | 4 ++++
qemu/qemu-kvm-powerpc.c | 4 ++++
qemu/qemu-kvm-x86.c | 16 ++++++++++++++++
qemu/qemu-kvm.c | 38 ++++++++++++++++++--------------------
qemu/qemu-kvm.h | 1 +
qemu/vl.c | 11 ++++++-----
6 files changed, 49 insertions(+), 25 deletions(-)
Index: b/qemu/qemu-kvm.c
===================================================================
--- a/qemu/qemu-kvm.c
+++ b/qemu/qemu-kvm.c
@@ -28,8 +28,6 @@ kvm_context_t kvm_context;
extern int smp_cpus;
-static int qemu_kvm_reset_requested;
-
pthread_mutex_t qemu_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t qemu_aio_cond = PTHREAD_COND_INITIALIZER;
pthread_cond_t qemu_vcpu_cond = PTHREAD_COND_INITIALIZER;
@@ -56,7 +54,6 @@ struct vcpu_info {
int signalled;
int stop;
int stopped;
- int reload_regs;
int created;
} vcpu_info[256];
@@ -257,17 +254,20 @@ static int all_threads_paused(void)
static void pause_all_threads(void)
{
+ CPUState *cpu_single = cpu_single_env;
int i;
- for (i = 0; i < smp_cpus; ++i) {
- vcpu_info[i].stop = 1;
- pthread_kill(vcpu_info[i].thread, SIG_IPI);
- }
+ for (i = 0; i < smp_cpus; ++i)
+ if (!cpu_single || cpu_single->cpu_index != i) {
+ vcpu_info[i].stop = 1;
+ pthread_kill(vcpu_info[i].thread, SIG_IPI);
+ }
+
while (!all_threads_paused()) {
pthread_mutex_unlock(&qemu_mutex);
- kvm_eat_signal(&io_signal_table, NULL, 1000);
+ kvm_eat_signal(&io_signal_table, cpu_single, 1000);
pthread_mutex_lock(&qemu_mutex);
- cpu_single_env = NULL;
+ cpu_single_env = cpu_single;
}
}
@@ -317,11 +317,18 @@ void qemu_kvm_system_reset_request(void)
{
int i;
+ pause_all_threads();
+
+ qemu_system_reset();
+
+ for (i = 0; i < smp_cpus; ++i)
+ kvm_arch_cpu_reset(vcpu_info[i].env);
+
for (i = 0; i < smp_cpus; ++i) {
- vcpu_info[i].reload_regs = 1;
+ vcpu_info[i].stop = 0;
+ vcpu_info[i].stopped = 0;
pthread_kill(vcpu_info[i].thread, SIG_IPI);
}
- qemu_system_reset();
}
static int kvm_main_loop_cpu(CPUState *env)
@@ -354,11 +361,6 @@ static int kvm_main_loop_cpu(CPUState *e
kvm_cpu_exec(env);
env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
kvm_main_loop_wait(env, 0);
- if (info->reload_regs) {
- info->reload_regs = 0;
- if (env->cpu_index == 0) /* ap needs to be placed in INIT */
- kvm_arch_load_regs(env);
- }
}
pthread_mutex_unlock(&qemu_mutex);
return 0;
@@ -467,10 +469,6 @@ int kvm_main_loop(void)
break;
else if (qemu_powerdown_requested())
qemu_system_powerdown();
- else if (qemu_reset_requested()) {
- pthread_kill(vcpu_info[0].thread, SIG_IPI);
- qemu_kvm_reset_requested = 1;
- }
pthread_mutex_unlock(&qemu_mutex);
}
Index: b/qemu/qemu-kvm-ia64.c
===================================================================
--- a/qemu/qemu-kvm-ia64.c
+++ b/qemu/qemu-kvm-ia64.c
@@ -61,3 +61,7 @@ int kvm_arch_try_push_interrupts(void *o
void kvm_arch_update_regs_for_sipi(CPUState *env)
{
}
+
+void kvm_arch_cpu_reset(CPUState *env)
+{
+}
Index: b/qemu/qemu-kvm-powerpc.c
===================================================================
--- a/qemu/qemu-kvm-powerpc.c
+++ b/qemu/qemu-kvm-powerpc.c
@@ -213,3 +213,7 @@ int handle_powerpc_dcr_write(int vcpu, u
return 0; /* XXX ignore failed DCR ops */
}
+
+void kvm_arch_cpu_reset(CPUState *env)
+{
+}
Index: b/qemu/qemu-kvm-x86.c
===================================================================
--- a/qemu/qemu-kvm-x86.c
+++ b/qemu/qemu-kvm-x86.c
@@ -689,3 +689,19 @@ int handle_tpr_access(void *opaque, int
kvm_tpr_access_report(cpu_single_env, rip, is_write);
return 0;
}
+
+void kvm_arch_cpu_reset(CPUState *env)
+{
+ struct kvm_mp_state mp_state = { .mp_state = KVM_MP_STATE_UNINITIALIZED };
+
+ kvm_arch_load_regs(env);
+ if (env->cpu_index != 0) {
+ if (kvm_irqchip_in_kernel(kvm_context))
+ kvm_set_mpstate(kvm_context, env->cpu_index, &mp_state);
+ else {
+ env->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ env->hflags |= HF_HALTED_MASK;
+ env->exception_index = EXCP_HLT;
+ }
+ }
+}
Index: b/qemu/qemu-kvm.h
===================================================================
--- a/qemu/qemu-kvm.h
+++ b/qemu/qemu-kvm.h
@@ -55,6 +55,7 @@ void kvm_arch_post_kvm_run(void *opaque,
int kvm_arch_has_work(CPUState *env);
int kvm_arch_try_push_interrupts(void *opaque);
void kvm_arch_update_regs_for_sipi(CPUState *env);
+void kvm_arch_cpu_reset(CPUState *env);
CPUState *qemu_kvm_cpu_env(int index);
Index: b/qemu/vl.c
===================================================================
--- a/qemu/vl.c
+++ b/qemu/vl.c
@@ -7235,6 +7235,12 @@ void qemu_system_reset(void)
void qemu_system_reset_request(void)
{
+#ifdef USE_KVM
+ if (kvm_allowed && !no_reboot) {
+ qemu_kvm_system_reset_request();
+ return;
+ }
+#endif
if (no_reboot) {
shutdown_requested = 1;
} else {
@@ -7242,11 +7248,6 @@ void qemu_system_reset_request(void)
}
if (cpu_single_env)
cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
-#ifdef USE_KVM
- if (kvm_allowed)
- if (!no_reboot)
- qemu_kvm_system_reset_request();
-#endif
}
void qemu_system_shutdown_request(void)
-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
Don't miss this year's exciting event. There's still time to save $100.
Use priority code J8TL2D2.
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] qemu-kvm: fix guest resetting
2008-05-08 8:29 [PATCH] qemu-kvm: fix guest resetting Jan Kiszka
@ 2008-05-08 23:35 ` Marcelo Tosatti
2008-05-09 8:13 ` Jan Kiszka
0 siblings, 1 reply; 6+ messages in thread
From: Marcelo Tosatti @ 2008-05-08 23:35 UTC (permalink / raw)
To: Jan Kiszka; +Cc: kvm-devel
Hi Jan,
On Thu, May 08, 2008 at 10:29:32AM +0200, Jan Kiszka wrote:
> Resetting guests used to be racy, deadlock-prone, or simply broken (for
> SMP). This patch fixes the issues - at least for me on x86 (tested on
> Intel SMP host, UP and SMP guest, in-kernel und user space irqchip,
> guest- and monitor-issued resets). Note that ia64 and powerpc may need
> to look into the SMP thing as well (=>kvm_arch_cpu_reset).
>
> At this chance, the patch also cleans up some unneeded reset fragments.
>
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> ---
> @@ -317,11 +317,18 @@ void qemu_kvm_system_reset_request(void)
> {
> int i;
>
> + pause_all_threads();
> +
> + qemu_system_reset();
> +
> + for (i = 0; i < smp_cpus; ++i)
> + kvm_arch_cpu_reset(vcpu_info[i].env);
> +
> for (i = 0; i < smp_cpus; ++i) {
> - vcpu_info[i].reload_regs = 1;
> + vcpu_info[i].stop = 0;
> + vcpu_info[i].stopped = 0;
> pthread_kill(vcpu_info[i].thread, SIG_IPI);
> }
> - qemu_system_reset();
Why don't you signal the IO thread to pause all vcpu's and place their
registers and "run state" in the proper condition if the reset request
comes from the guest? It should simplify things a lot (and avoid any
changes to vl.c).
After signalling the vcpu should stop instead of returning to guest
mode.
-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
Don't miss this year's exciting event. There's still time to save $100.
Use priority code J8TL2D2.
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] qemu-kvm: fix guest resetting
2008-05-08 23:35 ` Marcelo Tosatti
@ 2008-05-09 8:13 ` Jan Kiszka
2008-05-09 16:59 ` Marcelo Tosatti
0 siblings, 1 reply; 6+ messages in thread
From: Jan Kiszka @ 2008-05-09 8:13 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm-devel
Marcelo Tosatti wrote:
> Hi Jan,
>
> On Thu, May 08, 2008 at 10:29:32AM +0200, Jan Kiszka wrote:
>> Resetting guests used to be racy, deadlock-prone, or simply broken (for
>> SMP). This patch fixes the issues - at least for me on x86 (tested on
>> Intel SMP host, UP and SMP guest, in-kernel und user space irqchip,
>> guest- and monitor-issued resets). Note that ia64 and powerpc may need
>> to look into the SMP thing as well (=>kvm_arch_cpu_reset).
>>
>> At this chance, the patch also cleans up some unneeded reset fragments.
>>
>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
>> ---
>> @@ -317,11 +317,18 @@ void qemu_kvm_system_reset_request(void)
>> {
>> int i;
>>
>> + pause_all_threads();
>> +
>> + qemu_system_reset();
>> +
>> + for (i = 0; i < smp_cpus; ++i)
>> + kvm_arch_cpu_reset(vcpu_info[i].env);
>> +
>> for (i = 0; i < smp_cpus; ++i) {
>> - vcpu_info[i].reload_regs = 1;
>> + vcpu_info[i].stop = 0;
>> + vcpu_info[i].stopped = 0;
>> pthread_kill(vcpu_info[i].thread, SIG_IPI);
>> }
>> - qemu_system_reset();
>
> Why don't you signal the IO thread to pause all vcpu's and place their
> registers and "run state" in the proper condition if the reset request
> comes from the guest? It should simplify things a lot (and avoid any
> changes to vl.c).
>
> After signalling the vcpu should stop instead of returning to guest
> mode.
Hmm, need to think a bit more about it as I don't see the benefit yet
(code suggestions are welcome in the meantime :)!).
The changes to vl.c are actually cleanups, as pause_all_threads() is now
context-agnostic and we no longer need to go through the qemu way of
raising reset. This new property of pause_all_threads() is something we
should desire for simplicity and robustness reasons anyway (e.g. to
simplify guest debugging later on). And the way
qemu_kvm_system_reset_request() is implemented would not change, we need
this serialization to avoid races between IO and VCPU threads.
Jan
--
Siemens AG, Corporate Technology, CT SE 2
Corporate Competence Center Embedded Linux
-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
Don't miss this year's exciting event. There's still time to save $100.
Use priority code J8TL2D2.
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] qemu-kvm: fix guest resetting
2008-05-09 8:13 ` Jan Kiszka
@ 2008-05-09 16:59 ` Marcelo Tosatti
2008-05-09 18:46 ` Anthony Liguori
0 siblings, 1 reply; 6+ messages in thread
From: Marcelo Tosatti @ 2008-05-09 16:59 UTC (permalink / raw)
To: Jan Kiszka; +Cc: kvm-devel
On Fri, May 09, 2008 at 10:13:33AM +0200, Jan Kiszka wrote:
> Hmm, need to think a bit more about it as I don't see the benefit yet
> (code suggestions are welcome in the meantime :)!).
Doing the reset from two different contexes is confusing and more
complicated.
> The changes to vl.c are actually cleanups, as pause_all_threads() is now
> context-agnostic and we no longer need to go through the qemu way of
> raising reset. This new property of pause_all_threads() is something we
> should desire for simplicity and robustness reasons anyway (e.g. to
> simplify guest debugging later on).
You can signal the IO thread (main_loop_break in the current
kvm-userspace.git) and then stop.
> And the way qemu_kvm_system_reset_request() is implemented would not
> change, we need this serialization to avoid races between IO and VCPU
> threads.
How about this ?
diff --git a/qemu/qemu-kvm-ia64.c b/qemu/qemu-kvm-ia64.c
index 4d0ddd7..d227d22 100644
--- a/qemu/qemu-kvm-ia64.c
+++ b/qemu/qemu-kvm-ia64.c
@@ -61,3 +61,7 @@ int kvm_arch_try_push_interrupts(void *opaque)
void kvm_arch_update_regs_for_sipi(CPUState *env)
{
}
+
+void kvm_arch_cpu_reset(CPUState *env)
+{
+}
diff --git a/qemu/qemu-kvm-powerpc.c b/qemu/qemu-kvm-powerpc.c
index 14ed945..024b18c 100644
--- a/qemu/qemu-kvm-powerpc.c
+++ b/qemu/qemu-kvm-powerpc.c
@@ -213,3 +213,7 @@ int handle_powerpc_dcr_write(int vcpu, uint32_t dcrn, uint32_t data)
return 0; /* XXX ignore failed DCR ops */
}
+
+void kvm_arch_cpu_reset(CPUState *env)
+{
+}
diff --git a/qemu/qemu-kvm-x86.c b/qemu/qemu-kvm-x86.c
index 9a771ff..28eb5c2 100644
--- a/qemu/qemu-kvm-x86.c
+++ b/qemu/qemu-kvm-x86.c
@@ -689,3 +689,19 @@ int handle_tpr_access(void *opaque, int vcpu,
kvm_tpr_access_report(cpu_single_env, rip, is_write);
return 0;
}
+
+void kvm_arch_cpu_reset(CPUState *env)
+{
+ struct kvm_mp_state mp_state = { .mp_state = KVM_MP_STATE_UNINITIALIZED };
+
+ kvm_arch_load_regs(env);
+ if (env->cpu_index != 0) {
+ if (kvm_irqchip_in_kernel(kvm_context))
+ kvm_set_mpstate(kvm_context, env->cpu_index, &mp_state);
+ else {
+ env->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ env->hflags |= HF_HALTED_MASK;
+ env->exception_index = EXCP_HLT;
+ }
+ }
+}
diff --git a/qemu/qemu-kvm.c b/qemu/qemu-kvm.c
index 3cc6d8e..1e1f065 100644
--- a/qemu/qemu-kvm.c
+++ b/qemu/qemu-kvm.c
@@ -31,8 +31,6 @@ kvm_context_t kvm_context;
extern int smp_cpus;
-static int qemu_kvm_reset_requested;
-
pthread_mutex_t qemu_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t qemu_aio_cond = PTHREAD_COND_INITIALIZER;
pthread_cond_t qemu_vcpu_cond = PTHREAD_COND_INITIALIZER;
@@ -52,7 +50,6 @@ struct vcpu_info {
int signalled;
int stop;
int stopped;
- int reload_regs;
int created;
} vcpu_info[256];
@@ -242,21 +239,29 @@ static void pause_all_threads(void)
{
int i;
+ if (cpu_single_env) {
+ fprintf(stderr, "qemu-kvm: pause_all_threads from vcpu context\n");
+ exit(0);
+ }
+
for (i = 0; i < smp_cpus; ++i) {
vcpu_info[i].stop = 1;
pthread_kill(vcpu_info[i].thread, SIG_IPI);
}
- while (!all_threads_paused()) {
- CPUState *env = cpu_single_env;
+ while (!all_threads_paused())
pthread_cond_wait(&qemu_pause_cond, &qemu_mutex);
- cpu_single_env = env;
- }
+ cpu_single_env = NULL;
}
static void resume_all_threads(void)
{
int i;
+ if (cpu_single_env) {
+ fprintf(stderr, "qemu-kvm: resume_all_threads from vcpu context\n");
+ exit(0);
+ }
+
for (i = 0; i < smp_cpus; ++i) {
vcpu_info[i].stop = 0;
vcpu_info[i].stopped = 0;
@@ -301,15 +306,18 @@ static void setup_kernel_sigmask(CPUState *env)
kvm_set_signal_mask(kvm_context, env->cpu_index, &set);
}
-void qemu_kvm_system_reset_request(void)
+void qemu_kvm_system_reset(void)
{
int i;
- for (i = 0; i < smp_cpus; ++i) {
- vcpu_info[i].reload_regs = 1;
- pthread_kill(vcpu_info[i].thread, SIG_IPI);
- }
+ pause_all_threads();
+
qemu_system_reset();
+
+ for (i = 0; i < smp_cpus; ++i)
+ kvm_arch_cpu_reset(vcpu_info[i].env);
+
+ resume_all_threads();
}
static int kvm_main_loop_cpu(CPUState *env)
@@ -342,11 +350,6 @@ static int kvm_main_loop_cpu(CPUState *env)
kvm_cpu_exec(env);
env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
kvm_main_loop_wait(env, 0);
- if (info->reload_regs) {
- info->reload_regs = 0;
- if (env->cpu_index == 0) /* ap needs to be placed in INIT */
- kvm_arch_load_regs(env);
- }
}
pthread_mutex_unlock(&qemu_mutex);
return 0;
@@ -529,10 +532,8 @@ int kvm_main_loop(void)
break;
else if (qemu_powerdown_requested())
qemu_system_powerdown();
- else if (qemu_reset_requested()) {
- pthread_kill(vcpu_info[0].thread, SIG_IPI);
- qemu_kvm_reset_requested = 1;
- }
+ else if (qemu_reset_requested())
+ qemu_kvm_system_reset();
}
pause_all_threads();
@@ -641,6 +642,9 @@ static int kvm_halt(void *opaque, int vcpu)
static int kvm_shutdown(void *opaque, int vcpu)
{
+ /* stop the current vcpu from going back to guest mode */
+ vcpu_info[cpu_single_env->cpu_index].stopped = 1;
+
qemu_system_reset_request();
return 1;
}
diff --git a/qemu/qemu-kvm.h b/qemu/qemu-kvm.h
index 21606e9..7412e20 100644
--- a/qemu/qemu-kvm.h
+++ b/qemu/qemu-kvm.h
@@ -59,6 +59,7 @@ void kvm_arch_post_kvm_run(void *opaque, int vcpu);
int kvm_arch_has_work(CPUState *env);
int kvm_arch_try_push_interrupts(void *opaque);
void kvm_arch_update_regs_for_sipi(CPUState *env);
+void kvm_arch_cpu_reset(CPUState *env);
CPUState *qemu_kvm_cpu_env(int index);
diff --git a/qemu/vl.c b/qemu/vl.c
index 45c97af..a38c336 100644
--- a/qemu/vl.c
+++ b/qemu/vl.c
@@ -7302,11 +7302,7 @@ void qemu_system_reset_request(void)
}
if (cpu_single_env)
cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
-#ifdef USE_KVM
- if (kvm_allowed)
- if (!no_reboot)
- qemu_kvm_system_reset_request();
-#endif
+ main_loop_break();
}
void qemu_system_shutdown_request(void)
-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
Don't miss this year's exciting event. There's still time to save $100.
Use priority code J8TL2D2.
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] qemu-kvm: fix guest resetting
2008-05-09 16:59 ` Marcelo Tosatti
@ 2008-05-09 18:46 ` Anthony Liguori
2008-05-10 8:26 ` Jan Kiszka
0 siblings, 1 reply; 6+ messages in thread
From: Anthony Liguori @ 2008-05-09 18:46 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm-devel, Jan Kiszka
Marcelo Tosatti wrote:
> diff --git a/qemu/qemu-kvm-ia64.c b/qemu/qemu-kvm-ia64.c
> index 4d0ddd7..d227d22 100644
> --- a/qemu/qemu-kvm-ia64.c
> +++ b/qemu/qemu-kvm-ia64.c
> @@ -61,3 +61,7 @@ int kvm_arch_try_push_interrupts(void *opaque)
> void kvm_arch_update_regs_for_sipi(CPUState *env)
> {
> }
> +
> +void kvm_arch_cpu_reset(CPUState *env)
> +{
> +}
> diff --git a/qemu/qemu-kvm-powerpc.c b/qemu/qemu-kvm-powerpc.c
> index 14ed945..024b18c 100644
> --- a/qemu/qemu-kvm-powerpc.c
> +++ b/qemu/qemu-kvm-powerpc.c
> @@ -213,3 +213,7 @@ int handle_powerpc_dcr_write(int vcpu, uint32_t dcrn, uint32_t data)
>
> return 0; /* XXX ignore failed DCR ops */
> }
> +
> +void kvm_arch_cpu_reset(CPUState *env)
> +{
> +}
> diff --git a/qemu/qemu-kvm-x86.c b/qemu/qemu-kvm-x86.c
> index 9a771ff..28eb5c2 100644
> --- a/qemu/qemu-kvm-x86.c
> +++ b/qemu/qemu-kvm-x86.c
> @@ -689,3 +689,19 @@ int handle_tpr_access(void *opaque, int vcpu,
> kvm_tpr_access_report(cpu_single_env, rip, is_write);
> return 0;
> }
> +
> +void kvm_arch_cpu_reset(CPUState *env)
> +{
> + struct kvm_mp_state mp_state = { .mp_state = KVM_MP_STATE_UNINITIALIZED };
> +
> + kvm_arch_load_regs(env);
> + if (env->cpu_index != 0) {
> + if (kvm_irqchip_in_kernel(kvm_context))
> + kvm_set_mpstate(kvm_context, env->cpu_index, &mp_state);
> + else {
> + env->interrupt_request &= ~CPU_INTERRUPT_HARD;
> + env->hflags |= HF_HALTED_MASK;
> + env->exception_index = EXCP_HLT;
> + }
> + }
> +}
> diff --git a/qemu/qemu-kvm.c b/qemu/qemu-kvm.c
> index 3cc6d8e..1e1f065 100644
> --- a/qemu/qemu-kvm.c
> +++ b/qemu/qemu-kvm.c
> @@ -31,8 +31,6 @@ kvm_context_t kvm_context;
>
> extern int smp_cpus;
>
> -static int qemu_kvm_reset_requested;
> -
> pthread_mutex_t qemu_mutex = PTHREAD_MUTEX_INITIALIZER;
> pthread_cond_t qemu_aio_cond = PTHREAD_COND_INITIALIZER;
> pthread_cond_t qemu_vcpu_cond = PTHREAD_COND_INITIALIZER;
> @@ -52,7 +50,6 @@ struct vcpu_info {
> int signalled;
> int stop;
> int stopped;
> - int reload_regs;
> int created;
> } vcpu_info[256];
>
> @@ -242,21 +239,29 @@ static void pause_all_threads(void)
> {
> int i;
>
> + if (cpu_single_env) {
> + fprintf(stderr, "qemu-kvm: pause_all_threads from vcpu context\n");
> + exit(0);
> + }
> +
> for (i = 0; i < smp_cpus; ++i) {
> vcpu_info[i].stop = 1;
> pthread_kill(vcpu_info[i].thread, SIG_IPI);
> }
> - while (!all_threads_paused()) {
> - CPUState *env = cpu_single_env;
> + while (!all_threads_paused())
> pthread_cond_wait(&qemu_pause_cond, &qemu_mutex);
> - cpu_single_env = env;
> - }
> + cpu_single_env = NULL;
> }
>
Personally, I prefer it the old way. All of the open-coded
cpu_single_env's are tough to understand and I believe error-prone. I
think a strategy of explicitly preserving cpu_single_env whenever we
drop qemu_mutex is more robust. Explicitly setting cpu_single_env =
NULL happens to work because this is only called from the io thread.
It's less clear to a casual reader why it's necessary.
In fact, I'd be much more inclined to see a wrapper around
pthread_cond_wait() so that we never explicitly had to set cpu_single_env.
Regards,
Anthony Liguori
> }
>
> void qemu_system_shutdown_request(void)
>
> -------------------------------------------------------------------------
> This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
> Don't miss this year's exciting event. There's still time to save $100.
> Use priority code J8TL2D2.
> http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
> _______________________________________________
> kvm-devel mailing list
> kvm-devel@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/kvm-devel
>
-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
Don't miss this year's exciting event. There's still time to save $100.
Use priority code J8TL2D2.
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] qemu-kvm: fix guest resetting
2008-05-09 18:46 ` Anthony Liguori
@ 2008-05-10 8:26 ` Jan Kiszka
0 siblings, 0 replies; 6+ messages in thread
From: Jan Kiszka @ 2008-05-10 8:26 UTC (permalink / raw)
To: Anthony Liguori; +Cc: kvm-devel, Marcelo Tosatti
Anthony Liguori wrote:
> Marcelo Tosatti wrote:
>> diff --git a/qemu/qemu-kvm-ia64.c b/qemu/qemu-kvm-ia64.c
>> index 4d0ddd7..d227d22 100644
>> --- a/qemu/qemu-kvm-ia64.c
>> +++ b/qemu/qemu-kvm-ia64.c
>> @@ -61,3 +61,7 @@ int kvm_arch_try_push_interrupts(void *opaque)
>> void kvm_arch_update_regs_for_sipi(CPUState *env)
>> {
>> }
>> +
>> +void kvm_arch_cpu_reset(CPUState *env)
>> +{
>> +}
>> diff --git a/qemu/qemu-kvm-powerpc.c b/qemu/qemu-kvm-powerpc.c
>> index 14ed945..024b18c 100644
>> --- a/qemu/qemu-kvm-powerpc.c
>> +++ b/qemu/qemu-kvm-powerpc.c
>> @@ -213,3 +213,7 @@ int handle_powerpc_dcr_write(int vcpu, uint32_t dcrn, uint32_t data)
>>
>> return 0; /* XXX ignore failed DCR ops */
>> }
>> +
>> +void kvm_arch_cpu_reset(CPUState *env)
>> +{
>> +}
>> diff --git a/qemu/qemu-kvm-x86.c b/qemu/qemu-kvm-x86.c
>> index 9a771ff..28eb5c2 100644
>> --- a/qemu/qemu-kvm-x86.c
>> +++ b/qemu/qemu-kvm-x86.c
>> @@ -689,3 +689,19 @@ int handle_tpr_access(void *opaque, int vcpu,
>> kvm_tpr_access_report(cpu_single_env, rip, is_write);
>> return 0;
>> }
>> +
>> +void kvm_arch_cpu_reset(CPUState *env)
>> +{
>> + struct kvm_mp_state mp_state = { .mp_state = KVM_MP_STATE_UNINITIALIZED };
>> +
>> + kvm_arch_load_regs(env);
>> + if (env->cpu_index != 0) {
>> + if (kvm_irqchip_in_kernel(kvm_context))
>> + kvm_set_mpstate(kvm_context, env->cpu_index, &mp_state);
>> + else {
>> + env->interrupt_request &= ~CPU_INTERRUPT_HARD;
>> + env->hflags |= HF_HALTED_MASK;
>> + env->exception_index = EXCP_HLT;
>> + }
>> + }
>> +}
>> diff --git a/qemu/qemu-kvm.c b/qemu/qemu-kvm.c
>> index 3cc6d8e..1e1f065 100644
>> --- a/qemu/qemu-kvm.c
>> +++ b/qemu/qemu-kvm.c
>> @@ -31,8 +31,6 @@ kvm_context_t kvm_context;
>>
>> extern int smp_cpus;
>>
>> -static int qemu_kvm_reset_requested;
>> -
>> pthread_mutex_t qemu_mutex = PTHREAD_MUTEX_INITIALIZER;
>> pthread_cond_t qemu_aio_cond = PTHREAD_COND_INITIALIZER;
>> pthread_cond_t qemu_vcpu_cond = PTHREAD_COND_INITIALIZER;
>> @@ -52,7 +50,6 @@ struct vcpu_info {
>> int signalled;
>> int stop;
>> int stopped;
>> - int reload_regs;
>> int created;
>> } vcpu_info[256];
>>
>> @@ -242,21 +239,29 @@ static void pause_all_threads(void)
>> {
>> int i;
>>
>> + if (cpu_single_env) {
>> + fprintf(stderr, "qemu-kvm: pause_all_threads from vcpu context\n");
>> + exit(0);
>> + }
>> +
>> for (i = 0; i < smp_cpus; ++i) {
>> vcpu_info[i].stop = 1;
>> pthread_kill(vcpu_info[i].thread, SIG_IPI);
>> }
>> - while (!all_threads_paused()) {
>> - CPUState *env = cpu_single_env;
>> + while (!all_threads_paused())
>> pthread_cond_wait(&qemu_pause_cond, &qemu_mutex);
>> - cpu_single_env = env;
>> - }
>> + cpu_single_env = NULL;
>> }
>>
>
> Personally, I prefer it the old way. All of the open-coded
> cpu_single_env's are tough to understand and I believe error-prone. I
> think a strategy of explicitly preserving cpu_single_env whenever we
> drop qemu_mutex is more robust. Explicitly setting cpu_single_env =
> NULL happens to work because this is only called from the io thread.
> It's less clear to a casual reader why it's necessary.
>
> In fact, I'd be much more inclined to see a wrapper around
> pthread_cond_wait() so that we never explicitly had to set cpu_single_env.
You mean something like this?
(Hunks may have some offsets, the patch is from the middle of my queue.)
Marcelo, I'm fine with your approach. I even switched my
vm_stop-on-breakpoint patch to this pattern, and it seems to work
nicely. Will roll out an updated series later (but probably not today -
weather is too fine here 8)).
---
qemu/qemu-kvm.c | 32 +++++++++++++++++++-------------
1 file changed, 19 insertions(+), 13 deletions(-)
Index: b/qemu/qemu-kvm.c
===================================================================
--- a/qemu/qemu-kvm.c
+++ b/qemu/qemu-kvm.c
@@ -12,6 +12,7 @@ int kvm_allowed = 1;
int kvm_irqchip = 1;
int kvm_pit = 1;
+#include <assert.h>
#include <string.h>
#include "hw/hw.h"
#include "sysemu.h"
@@ -65,6 +66,14 @@ static inline unsigned long kvm_get_thre
return syscall(SYS_gettid);
}
+static void qemu_cond_wait(pthread_cond_t *cond)
+{
+ CPUState *env = cpu_single_env;
+
+ pthread_cond_wait(cond, &qemu_mutex);
+ cpu_single_env = env;
+}
+
CPUState *qemu_kvm_cpu_env(int index)
{
return vcpu_info[index].env;
@@ -247,21 +256,22 @@ static void pause_all_threads(void)
{
int i;
+ assert(!cpu_single_env);
+
for (i = 0; i < smp_cpus; ++i) {
vcpu_info[i].stop = 1;
pthread_kill(vcpu_info[i].thread, SIG_IPI);
}
- while (!all_threads_paused()) {
- CPUState *env = cpu_single_env;
- pthread_cond_wait(&qemu_pause_cond, &qemu_mutex);
- cpu_single_env = env;
- }
+ while (!all_threads_paused())
+ qemu_cond_wait(&qemu_pause_cond);
}
static void resume_all_threads(void)
{
int i;
+ assert(!cpu_single_env);
+
for (i = 0; i < smp_cpus; ++i) {
vcpu_info[i].stop = 0;
vcpu_info[i].stopped = 0;
@@ -380,7 +390,7 @@ static void *ap_main_loop(void *_env)
/* and wait for machine initialization */
while (!qemu_system_ready)
- pthread_cond_wait(&qemu_system_cond, &qemu_mutex);
+ qemu_cond_wait(&qemu_system_cond);
pthread_mutex_unlock(&qemu_mutex);
kvm_main_loop_cpu(env);
@@ -392,7 +402,7 @@ void kvm_init_new_ap(int cpu, CPUState *
pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
while (vcpu_info[cpu].created == 0)
- pthread_cond_wait(&qemu_vcpu_cond, &qemu_mutex);
+ qemu_cond_wait(&qemu_vcpu_cond);
}
int kvm_init_ap(void)
@@ -901,8 +911,6 @@ void qemu_kvm_aio_wait_start(void)
void qemu_kvm_aio_wait(void)
{
- CPUState *cpu_single = cpu_single_env;
-
if (!cpu_single_env) {
if (io_thread_sigfd != -1) {
fd_set rfds;
@@ -919,10 +927,8 @@ void qemu_kvm_aio_wait(void)
sigfd_handler((void *)(unsigned long)io_thread_sigfd);
}
qemu_aio_poll();
- } else {
- pthread_cond_wait(&qemu_aio_cond, &qemu_mutex);
- cpu_single_env = cpu_single;
- }
+ } else
+ qemu_cond_wait(&qemu_aio_cond);
}
void qemu_kvm_aio_wait_end(void)
-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
Don't miss this year's exciting event. There's still time to save $100.
Use priority code J8TL2D2.
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2008-05-10 8:26 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-05-08 8:29 [PATCH] qemu-kvm: fix guest resetting Jan Kiszka
2008-05-08 23:35 ` Marcelo Tosatti
2008-05-09 8:13 ` Jan Kiszka
2008-05-09 16:59 ` Marcelo Tosatti
2008-05-09 18:46 ` Anthony Liguori
2008-05-10 8:26 ` Jan Kiszka
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox