* [Qemu-devel] [7241] qemu: refactor main_loop (Marcelo Tosatti)
@ 2009-04-24 18:03 Anthony Liguori
2009-04-28 22:34 ` [Qemu-devel] " Jan Kiszka
0 siblings, 1 reply; 8+ messages in thread
From: Anthony Liguori @ 2009-04-24 18:03 UTC (permalink / raw)
To: qemu-devel
Revision: 7241
http://svn.sv.gnu.org/viewvc/?view=rev&root=qemu&revision=7241
Author: aliguori
Date: 2009-04-24 18:03:33 +0000 (Fri, 24 Apr 2009)
Log Message:
-----------
qemu: refactor main_loop (Marcelo Tosatti)
Break main loop into 3 main functions.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Modified Paths:
--------------
trunk/vl.c
Modified: trunk/vl.c
===================================================================
--- trunk/vl.c 2009-04-24 18:03:29 UTC (rev 7240)
+++ trunk/vl.c 2009-04-24 18:03:33 UTC (rev 7241)
@@ -273,7 +273,7 @@
static CPUState *cur_cpu;
static CPUState *next_cpu;
-static int event_pending = 1;
+static int timer_alarm_pending = 1;
/* Conversion factor from emulated instructions to virtual clock ticks. */
static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
@@ -1360,7 +1360,7 @@
}
#endif
}
- event_pending = 1;
+ timer_alarm_pending = 1;
qemu_notify_event();
}
}
@@ -3879,153 +3879,175 @@
}
-static int main_loop(void)
+static int qemu_cpu_exec(CPUState *env)
{
- int ret, timeout;
+ int ret;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
+
+#ifdef CONFIG_PROFILER
+ ti = profile_getclock();
+#endif
+ if (use_icount) {
+ int64_t count;
+ int decr;
+ qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
+ env->icount_decr.u16.low = 0;
+ env->icount_extra = 0;
+ count = qemu_next_deadline();
+ count = (count + (1 << icount_time_shift) - 1)
+ >> icount_time_shift;
+ qemu_icount += count;
+ decr = (count > 0xffff) ? 0xffff : count;
+ count -= decr;
+ env->icount_decr.u16.low = decr;
+ env->icount_extra = count;
+ }
+ ret = cpu_exec(env);
+#ifdef CONFIG_PROFILER
+ qemu_time += profile_getclock() - ti;
+#endif
+ if (use_icount) {
+ /* Fold pending instructions back into the
+ instruction counter, and clear the interrupt flag. */
+ qemu_icount -= (env->icount_decr.u16.low
+ + env->icount_extra);
+ env->icount_decr.u32 = 0;
+ env->icount_extra = 0;
+ }
+ return ret;
+}
+
+static int cpu_has_work(CPUState *env)
+{
+ if (!env->halted)
+ return 1;
+ if (qemu_cpu_has_work(env))
+ return 1;
+ return 0;
+}
+
+static int tcg_has_work(void)
+{
CPUState *env;
- cur_cpu = first_cpu;
- next_cpu = cur_cpu->next_cpu ?: first_cpu;
- for(;;) {
- if (vm_running) {
+ for (env = first_cpu; env != NULL; env = env->next_cpu)
+ if (cpu_has_work(env))
+ return 1;
+ return 0;
+}
- for(;;) {
- /* get next cpu */
- env = next_cpu;
+static int qemu_calculate_timeout(void)
+{
+ int timeout;
+
+ if (!vm_running)
+ timeout = 5000;
+ else if (tcg_has_work())
+ timeout = 0;
+ else if (!use_icount)
+ timeout = 5000;
+ else {
+ /* XXX: use timeout computed from timers */
+ int64_t add;
+ int64_t delta;
+ /* Advance virtual time to the next event. */
+ if (use_icount == 1) {
+ /* When not using an adaptive execution frequency
+ we tend to get badly out of sync with real time,
+ so just delay for a reasonable amount of time. */
+ delta = 0;
+ } else {
+ delta = cpu_get_icount() - cpu_get_clock();
+ }
+ if (delta > 0) {
+ /* If virtual time is ahead of real time then just
+ wait for IO. */
+ timeout = (delta / 1000000) + 1;
+ } else {
+ /* Wait for either IO to occur or the next
+ timer event. */
+ add = qemu_next_deadline();
+ /* We advance the timer before checking for IO.
+ Limit the amount we advance so that early IO
+ activity won't get the guest too far ahead. */
+ if (add > 10000000)
+ add = 10000000;
+ delta += add;
+ add = (add + (1 << icount_time_shift) - 1)
+ >> icount_time_shift;
+ qemu_icount += add;
+ timeout = delta / 1000000;
+ if (timeout < 0)
+ timeout = 0;
+ }
+ }
+
+ return timeout;
+}
+
+static int vm_can_run(void)
+{
+ if (powerdown_requested)
+ return 0;
+ if (reset_requested)
+ return 0;
+ if (shutdown_requested)
+ return 0;
+ return 1;
+}
+
+static void main_loop(void)
+{
+ int ret = 0;
#ifdef CONFIG_PROFILER
- ti = profile_getclock();
+ int64_t ti;
#endif
- if (use_icount) {
- int64_t count;
- int decr;
- qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
- env->icount_decr.u16.low = 0;
- env->icount_extra = 0;
- count = qemu_next_deadline();
- count = (count + (1 << icount_time_shift) - 1)
- >> icount_time_shift;
- qemu_icount += count;
- decr = (count > 0xffff) ? 0xffff : count;
- count -= decr;
- env->icount_decr.u16.low = decr;
- env->icount_extra = count;
- }
- ret = cpu_exec(env);
-#ifdef CONFIG_PROFILER
- qemu_time += profile_getclock() - ti;
-#endif
- if (use_icount) {
- /* Fold pending instructions back into the
- instruction counter, and clear the interrupt flag. */
- qemu_icount -= (env->icount_decr.u16.low
- + env->icount_extra);
- env->icount_decr.u32 = 0;
- env->icount_extra = 0;
- }
- next_cpu = env->next_cpu ?: first_cpu;
- if (event_pending && likely(ret != EXCP_DEBUG)) {
- ret = EXCP_INTERRUPT;
- event_pending = 0;
+
+ for (;;) {
+ do {
+ if (next_cpu == NULL)
+ next_cpu = first_cpu;
+ for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
+ CPUState *env = cur_cpu = next_cpu;
+
+ if (!vm_running)
break;
- }
- if (ret == EXCP_HLT) {
- /* Give the next CPU a chance to run. */
- cur_cpu = env;
- continue;
- }
- if (ret != EXCP_HALTED)
+ if (timer_alarm_pending) {
+ timer_alarm_pending = 0;
break;
- /* all CPUs are halted ? */
- if (env == cur_cpu)
- break;
- }
- cur_cpu = env;
-
- if (shutdown_requested) {
- ret = EXCP_INTERRUPT;
- if (no_shutdown) {
- vm_stop(0);
- no_shutdown = 0;
}
- else
+ ret = qemu_cpu_exec(env);
+ if (ret == EXCP_DEBUG) {
+ gdb_set_stop_cpu(env);
break;
- }
- if (reset_requested) {
- reset_requested = 0;
- qemu_system_reset();
- ret = EXCP_INTERRUPT;
- }
- if (powerdown_requested) {
- powerdown_requested = 0;
- qemu_system_powerdown();
- ret = EXCP_INTERRUPT;
- }
- if (unlikely(ret == EXCP_DEBUG)) {
- gdb_set_stop_cpu(cur_cpu);
- vm_stop(EXCP_DEBUG);
- }
- /* If all cpus are halted then wait until the next IRQ */
- /* XXX: use timeout computed from timers */
- if (ret == EXCP_HALTED) {
- if (use_icount) {
- int64_t add;
- int64_t delta;
- /* Advance virtual time to the next event. */
- if (use_icount == 1) {
- /* When not using an adaptive execution frequency
- we tend to get badly out of sync with real time,
- so just delay for a reasonable amount of time. */
- delta = 0;
- } else {
- delta = cpu_get_icount() - cpu_get_clock();
- }
- if (delta > 0) {
- /* If virtual time is ahead of real time then just
- wait for IO. */
- timeout = (delta / 1000000) + 1;
- } else {
- /* Wait for either IO to occur or the next
- timer event. */
- add = qemu_next_deadline();
- /* We advance the timer before checking for IO.
- Limit the amount we advance so that early IO
- activity won't get the guest too far ahead. */
- if (add > 10000000)
- add = 10000000;
- delta += add;
- add = (add + (1 << icount_time_shift) - 1)
- >> icount_time_shift;
- qemu_icount += add;
- timeout = delta / 1000000;
- if (timeout < 0)
- timeout = 0;
- }
- } else {
- timeout = 5000;
}
- } else {
- timeout = 0;
}
- } else {
- if (shutdown_requested) {
- ret = EXCP_INTERRUPT;
- break;
- }
- timeout = 5000;
- }
#ifdef CONFIG_PROFILER
- ti = profile_getclock();
+ ti = profile_getclock();
#endif
- main_loop_wait(timeout);
+ main_loop_wait(qemu_calculate_timeout());
#ifdef CONFIG_PROFILER
- dev_time += profile_getclock() - ti;
+ dev_time += profile_getclock() - ti;
#endif
+ } while (ret != EXCP_DEBUG && vm_can_run());
+
+ if (ret == EXCP_DEBUG)
+ vm_stop(EXCP_DEBUG);
+
+ if (qemu_shutdown_requested()) {
+ if (no_shutdown) {
+ vm_stop(0);
+ no_shutdown = 0;
+ } else
+ break;
+ }
+ if (qemu_reset_requested())
+ qemu_system_reset();
+ if (qemu_powerdown_requested())
+ qemu_system_powerdown();
}
- cpu_disable_ticks();
- return ret;
}
static void version(void)
^ permalink raw reply [flat|nested] 8+ messages in thread
* [Qemu-devel] Re: [7241] qemu: refactor main_loop (Marcelo Tosatti)
2009-04-24 18:03 [Qemu-devel] [7241] qemu: refactor main_loop (Marcelo Tosatti) Anthony Liguori
@ 2009-04-28 22:34 ` Jan Kiszka
2009-04-28 22:41 ` Marcelo Tosatti
0 siblings, 1 reply; 8+ messages in thread
From: Jan Kiszka @ 2009-04-28 22:34 UTC (permalink / raw)
To: Anthony Liguori, Marcelo Tosatti; +Cc: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 3109 bytes --]
Anthony Liguori wrote:
> Revision: 7241
> http://svn.sv.gnu.org/viewvc/?view=rev&root=qemu&revision=7241
> Author: aliguori
> Date: 2009-04-24 18:03:33 +0000 (Fri, 24 Apr 2009)
> Log Message:
> -----------
> qemu: refactor main_loop (Marcelo Tosatti)
>
> Break main loop into 3 main functions.
I suspect this patch comes with a race between SIGALRM and
qemu_calculate_timeout() so that I see occasional freezes of exactly 5
seconds if the IO thread is disabled. I do not yet understand what
happens precisely or if this patch only widens an already existing race
window in the old code, but I'm on it.
Besides that...
>
> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
>
> Modified Paths:
> --------------
> trunk/vl.c
>
> Modified: trunk/vl.c
> ===================================================================
> --- trunk/vl.c 2009-04-24 18:03:29 UTC (rev 7240)
> +++ trunk/vl.c 2009-04-24 18:03:33 UTC (rev 7241)
> @@ -273,7 +273,7 @@
>
> static CPUState *cur_cpu;
> static CPUState *next_cpu;
> -static int event_pending = 1;
> +static int timer_alarm_pending = 1;
> /* Conversion factor from emulated instructions to virtual clock ticks. */
> static int icount_time_shift;
> /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
> @@ -1360,7 +1360,7 @@
> }
> #endif
> }
> - event_pending = 1;
> + timer_alarm_pending = 1;
> qemu_notify_event();
> }
> }
> @@ -3879,153 +3879,175 @@
>
> }
>
> -static int main_loop(void)
> +static int qemu_cpu_exec(CPUState *env)
> {
> - int ret, timeout;
> + int ret;
> #ifdef CONFIG_PROFILER
> int64_t ti;
> #endif
> +
> +#ifdef CONFIG_PROFILER
> + ti = profile_getclock();
> +#endif
> + if (use_icount) {
> + int64_t count;
> + int decr;
> + qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
> + env->icount_decr.u16.low = 0;
> + env->icount_extra = 0;
> + count = qemu_next_deadline();
> + count = (count + (1 << icount_time_shift) - 1)
> + >> icount_time_shift;
> + qemu_icount += count;
> + decr = (count > 0xffff) ? 0xffff : count;
> + count -= decr;
> + env->icount_decr.u16.low = decr;
> + env->icount_extra = count;
> + }
> + ret = cpu_exec(env);
> +#ifdef CONFIG_PROFILER
> + qemu_time += profile_getclock() - ti;
> +#endif
> + if (use_icount) {
> + /* Fold pending instructions back into the
> + instruction counter, and clear the interrupt flag. */
> + qemu_icount -= (env->icount_decr.u16.low
> + + env->icount_extra);
> + env->icount_decr.u32 = 0;
> + env->icount_extra = 0;
> + }
> + return ret;
> +}
> +
> +static int cpu_has_work(CPUState *env)
...this naming is suboptimal. There is already cpu_has_work() in
target-*/exec.h which is at least confusing. Please rename.
Jan
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 257 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* [Qemu-devel] Re: [7241] qemu: refactor main_loop (Marcelo Tosatti)
2009-04-28 22:34 ` [Qemu-devel] " Jan Kiszka
@ 2009-04-28 22:41 ` Marcelo Tosatti
2009-04-28 22:55 ` Jan Kiszka
0 siblings, 1 reply; 8+ messages in thread
From: Marcelo Tosatti @ 2009-04-28 22:41 UTC (permalink / raw)
To: Jan Kiszka; +Cc: qemu-devel
On Wed, Apr 29, 2009 at 12:34:55AM +0200, Jan Kiszka wrote:
> Anthony Liguori wrote:
> > Revision: 7241
> > http://svn.sv.gnu.org/viewvc/?view=rev&root=qemu&revision=7241
> > Author: aliguori
> > Date: 2009-04-24 18:03:33 +0000 (Fri, 24 Apr 2009)
> > Log Message:
> > -----------
> > qemu: refactor main_loop (Marcelo Tosatti)
> >
> > Break main loop into 3 main functions.
>
> I suspect this patch comes with a race between SIGALRM and
> qemu_calculate_timeout() so that I see occasional freezes of exactly 5
> seconds if the IO thread is disabled.
host_alarm_handler writes to the notification fd (via
qemu_event_increment), which should cause the select() to exit
immediately, even if a pending timer was not taken into account by
qemu_calculate_timeout().
But 5 seconds is a good clue :)
> I do not yet understand what
> happens precisely or if this patch only widens an already existing race
> window in the old code, but I'm on it.
>
> Besides that...
>
> >
> > Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
> > Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
> >
> > Modified Paths:
> > --------------
> > trunk/vl.c
> >
> > Modified: trunk/vl.c
> > ===================================================================
> > --- trunk/vl.c 2009-04-24 18:03:29 UTC (rev 7240)
> > +++ trunk/vl.c 2009-04-24 18:03:33 UTC (rev 7241)
> > @@ -273,7 +273,7 @@
> >
> > static CPUState *cur_cpu;
> > static CPUState *next_cpu;
> > -static int event_pending = 1;
> > +static int timer_alarm_pending = 1;
> > /* Conversion factor from emulated instructions to virtual clock ticks. */
> > static int icount_time_shift;
> > /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
> > @@ -1360,7 +1360,7 @@
> > }
> > #endif
> > }
> > - event_pending = 1;
> > + timer_alarm_pending = 1;
> > qemu_notify_event();
> > }
> > }
> > @@ -3879,153 +3879,175 @@
> >
> > }
> >
> > -static int main_loop(void)
> > +static int qemu_cpu_exec(CPUState *env)
> > {
> > - int ret, timeout;
> > + int ret;
> > #ifdef CONFIG_PROFILER
> > int64_t ti;
> > #endif
> > +
> > +#ifdef CONFIG_PROFILER
> > + ti = profile_getclock();
> > +#endif
> > + if (use_icount) {
> > + int64_t count;
> > + int decr;
> > + qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
> > + env->icount_decr.u16.low = 0;
> > + env->icount_extra = 0;
> > + count = qemu_next_deadline();
> > + count = (count + (1 << icount_time_shift) - 1)
> > + >> icount_time_shift;
> > + qemu_icount += count;
> > + decr = (count > 0xffff) ? 0xffff : count;
> > + count -= decr;
> > + env->icount_decr.u16.low = decr;
> > + env->icount_extra = count;
> > + }
> > + ret = cpu_exec(env);
> > +#ifdef CONFIG_PROFILER
> > + qemu_time += profile_getclock() - ti;
> > +#endif
> > + if (use_icount) {
> > + /* Fold pending instructions back into the
> > + instruction counter, and clear the interrupt flag. */
> > + qemu_icount -= (env->icount_decr.u16.low
> > + + env->icount_extra);
> > + env->icount_decr.u32 = 0;
> > + env->icount_extra = 0;
> > + }
> > + return ret;
> > +}
> > +
> > +static int cpu_has_work(CPUState *env)
>
> ...this naming is suboptimal. There is already cpu_has_work() in
> target-*/exec.h which is at least confusing. Please rename.
Well its static. What name do you prefer (can't find a better name
really). do_cpu_exec?
^ permalink raw reply [flat|nested] 8+ messages in thread
* [Qemu-devel] Re: [7241] qemu: refactor main_loop (Marcelo Tosatti)
2009-04-28 22:41 ` Marcelo Tosatti
@ 2009-04-28 22:55 ` Jan Kiszka
2009-04-28 23:02 ` Marcelo Tosatti
0 siblings, 1 reply; 8+ messages in thread
From: Jan Kiszka @ 2009-04-28 22:55 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 4080 bytes --]
Marcelo Tosatti wrote:
> On Wed, Apr 29, 2009 at 12:34:55AM +0200, Jan Kiszka wrote:
>> Anthony Liguori wrote:
>>> Revision: 7241
>>> http://svn.sv.gnu.org/viewvc/?view=rev&root=qemu&revision=7241
>>> Author: aliguori
>>> Date: 2009-04-24 18:03:33 +0000 (Fri, 24 Apr 2009)
>>> Log Message:
>>> -----------
>>> qemu: refactor main_loop (Marcelo Tosatti)
>>>
>>> Break main loop into 3 main functions.
>> I suspect this patch comes with a race between SIGALRM and
>> qemu_calculate_timeout() so that I see occasional freezes of exactly 5
>> seconds if the IO thread is disabled.
>
> host_alarm_handler writes to the notification fd (via
> qemu_event_increment), which should cause the select() to exit
> immediately, even if a pending timer was not taken into account by
> qemu_calculate_timeout().
Yeah, now I remember. And I always wondered why my strace logs reported
that writing to that file descriptor failed. I should have looked closer
into this immediately... (patch will follow 8) )
>
> But 5 seconds is a good clue :)
>
>> I do not yet understand what
>> happens precisely or if this patch only widens an already existing race
>> window in the old code, but I'm on it.
>>
>> Besides that...
>>
>>> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
>>> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
>>>
>>> Modified Paths:
>>> --------------
>>> trunk/vl.c
>>>
>>> Modified: trunk/vl.c
>>> ===================================================================
>>> --- trunk/vl.c 2009-04-24 18:03:29 UTC (rev 7240)
>>> +++ trunk/vl.c 2009-04-24 18:03:33 UTC (rev 7241)
>>> @@ -273,7 +273,7 @@
>>>
>>> static CPUState *cur_cpu;
>>> static CPUState *next_cpu;
>>> -static int event_pending = 1;
>>> +static int timer_alarm_pending = 1;
>>> /* Conversion factor from emulated instructions to virtual clock ticks. */
>>> static int icount_time_shift;
>>> /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
>>> @@ -1360,7 +1360,7 @@
>>> }
>>> #endif
>>> }
>>> - event_pending = 1;
>>> + timer_alarm_pending = 1;
>>> qemu_notify_event();
>>> }
>>> }
>>> @@ -3879,153 +3879,175 @@
>>>
>>> }
>>>
>>> -static int main_loop(void)
>>> +static int qemu_cpu_exec(CPUState *env)
>>> {
>>> - int ret, timeout;
>>> + int ret;
>>> #ifdef CONFIG_PROFILER
>>> int64_t ti;
>>> #endif
>>> +
>>> +#ifdef CONFIG_PROFILER
>>> + ti = profile_getclock();
>>> +#endif
>>> + if (use_icount) {
>>> + int64_t count;
>>> + int decr;
>>> + qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
>>> + env->icount_decr.u16.low = 0;
>>> + env->icount_extra = 0;
>>> + count = qemu_next_deadline();
>>> + count = (count + (1 << icount_time_shift) - 1)
>>> + >> icount_time_shift;
>>> + qemu_icount += count;
>>> + decr = (count > 0xffff) ? 0xffff : count;
>>> + count -= decr;
>>> + env->icount_decr.u16.low = decr;
>>> + env->icount_extra = count;
>>> + }
>>> + ret = cpu_exec(env);
>>> +#ifdef CONFIG_PROFILER
>>> + qemu_time += profile_getclock() - ti;
>>> +#endif
>>> + if (use_icount) {
>>> + /* Fold pending instructions back into the
>>> + instruction counter, and clear the interrupt flag. */
>>> + qemu_icount -= (env->icount_decr.u16.low
>>> + + env->icount_extra);
>>> + env->icount_decr.u32 = 0;
>>> + env->icount_extra = 0;
>>> + }
>>> + return ret;
>>> +}
>>> +
>>> +static int cpu_has_work(CPUState *env)
>> ...this naming is suboptimal. There is already cpu_has_work() in
>> target-*/exec.h which is at least confusing. Please rename.
>
> Well its static. What name do you prefer (can't find a better name
> really). do_cpu_exec?
>
Yes, it works, but it doesn't help reading the code. Also: tcg_has_work
- isn't kvm also running through this?
Jan
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 257 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* [Qemu-devel] Re: [7241] qemu: refactor main_loop (Marcelo Tosatti)
2009-04-28 22:55 ` Jan Kiszka
@ 2009-04-28 23:02 ` Marcelo Tosatti
2009-04-29 7:18 ` Gerd Hoffmann
0 siblings, 1 reply; 8+ messages in thread
From: Marcelo Tosatti @ 2009-04-28 23:02 UTC (permalink / raw)
To: Jan Kiszka; +Cc: qemu-devel
On Wed, Apr 29, 2009 at 12:55:07AM +0200, Jan Kiszka wrote:
> > host_alarm_handler writes to the notification fd (via
> > qemu_event_increment), which should cause the select() to exit
> > immediately, even if a pending timer was not taken into account by
> > qemu_calculate_timeout().
>
> Yeah, now I remember. And I always wondered why my strace logs reported
> that writing to that file descriptor failed. I should have looked closer
> into this immediately... (patch will follow 8) )
I started seeing VCPU_RUN fail with EBADFD, though it is not very easy
to reproduce (io thread disabled):
#0 0x0000003695e32215 in raise () from /lib64/libc.so.6
Missing separate debuginfos, use: debuginfo-install SDL.x86_64
cyrus-sasl.x86_64 glibc.x86_64 gnutls.x86_64 libX11.x86_64 libXau.x86_64
libXdmcp.x86_64 libgcrypt.x86_64 libgpg-error.x86_64 libtasn1.x86_64
libxcb.x86_64 ncurses.x86_64 zlib.x86_64
(gdb) bt
#0 0x0000003695e32215 in raise () from /lib64/libc.so.6
#1 0x0000003695e33d83 in abort () from /lib64/libc.so.6
#2 0x000000000041bf98 in kvm_cpu_exec (env=0xc04850)
at /home/marcelo/git/qemu/kvm-all.c:508
#3 0x00000000004c403b in cpu_x86_exec (env1=<value optimized out>)
at /home/marcelo/git/qemu/cpu-exec.c:349
#4 0x000000000040cc44 in main (argc=16, argv=0x7fff5dcec948,
envp=<value optimized out>) at /home/marcelo/git/qemu/vl.c:4300
If I just ignore that EBADFD instead of aborting, the next VCPU_RUN's
function properly. ?!?!?
When it happens, it does _once_ during guest boot, right at udev
initialization.
> > Well its static. What name do you prefer (can't find a better name
> > really). do_cpu_exec?
> >
>
> Yes, it works, but it doesn't help reading the code. Also: tcg_has_work
> - isn't kvm also running through this?
Yes it is, i'm very bad at names. Feel free to rename them.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] Re: [7241] qemu: refactor main_loop (Marcelo Tosatti)
2009-04-28 23:02 ` Marcelo Tosatti
@ 2009-04-29 7:18 ` Gerd Hoffmann
2009-04-29 7:54 ` Jan Kiszka
0 siblings, 1 reply; 8+ messages in thread
From: Gerd Hoffmann @ 2009-04-29 7:18 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: Jan Kiszka, qemu-devel
On 04/29/09 01:02, Marcelo Tosatti wrote:
> I started seeing VCPU_RUN fail with EBADFD, though it is not very easy
> to reproduce (io thread disabled):
/me too.
Doing a (kickstart-automated) fedora guest install triggers it for me.
Happens at some random point during package install.
> #2 0x000000000041bf98 in kvm_cpu_exec (env=0xc04850)
> at /home/marcelo/git/qemu/kvm-all.c:508
Yep, exactly that place.
cheers,
Gerd
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] Re: [7241] qemu: refactor main_loop (Marcelo Tosatti)
2009-04-29 7:18 ` Gerd Hoffmann
@ 2009-04-29 7:54 ` Jan Kiszka
2009-04-29 9:47 ` Gerd Hoffmann
0 siblings, 1 reply; 8+ messages in thread
From: Jan Kiszka @ 2009-04-29 7:54 UTC (permalink / raw)
To: Gerd Hoffmann; +Cc: Marcelo Tosatti, qemu-devel
[-- Attachment #1: Type: text/plain, Size: 567 bytes --]
Gerd Hoffmann wrote:
> On 04/29/09 01:02, Marcelo Tosatti wrote:
>> I started seeing VCPU_RUN fail with EBADFD, though it is not very easy
>> to reproduce (io thread disabled):
>
> /me too.
> Doing a (kickstart-automated) fedora guest install triggers it for me.
> Happens at some random point during package install.
>
>> #2 0x000000000041bf98 in kvm_cpu_exec (env=0xc04850)
>> at /home/marcelo/git/qemu/kvm-all.c:508
>
> Yep, exactly that place.
>
Does http://permalink.gmane.org/gmane.comp.emulators.qemu/42473 fix it
for you?
Jan
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 257 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] Re: [7241] qemu: refactor main_loop (Marcelo Tosatti)
2009-04-29 7:54 ` Jan Kiszka
@ 2009-04-29 9:47 ` Gerd Hoffmann
0 siblings, 0 replies; 8+ messages in thread
From: Gerd Hoffmann @ 2009-04-29 9:47 UTC (permalink / raw)
To: Jan Kiszka; +Cc: Marcelo Tosatti, qemu-devel
On 04/29/09 09:54, Jan Kiszka wrote:
> Gerd Hoffmann wrote:
>> On 04/29/09 01:02, Marcelo Tosatti wrote:
>>> I started seeing VCPU_RUN fail with EBADFD, though it is not very easy
>>> to reproduce (io thread disabled):
>> /me too.
>> Doing a (kickstart-automated) fedora guest install triggers it for me.
>> Happens at some random point during package install.
>>
>>> #2 0x000000000041bf98 in kvm_cpu_exec (env=0xc04850)
>>> at /home/marcelo/git/qemu/kvm-all.c:508
>> Yep, exactly that place.
>>
>
> Does http://permalink.gmane.org/gmane.comp.emulators.qemu/42473 fix it
> for you?
Yes, looks good.
cheers,
Gerd
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2009-04-29 9:48 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-04-24 18:03 [Qemu-devel] [7241] qemu: refactor main_loop (Marcelo Tosatti) Anthony Liguori
2009-04-28 22:34 ` [Qemu-devel] " Jan Kiszka
2009-04-28 22:41 ` Marcelo Tosatti
2009-04-28 22:55 ` Jan Kiszka
2009-04-28 23:02 ` Marcelo Tosatti
2009-04-29 7:18 ` Gerd Hoffmann
2009-04-29 7:54 ` Jan Kiszka
2009-04-29 9:47 ` Gerd Hoffmann
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).