From: mtosatti@redhat.com
To: qemu-devel@nongnu.org, aliguori@us.ibm.com
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Subject: [Qemu-devel] [patch 06/14] qemu: refactor main_loop
Date: Wed, 22 Apr 2009 16:15:10 -0300 [thread overview]
Message-ID: <20090422192119.991668223@localhost.localdomain> (raw)
In-Reply-To: 20090422191504.975476933@localhost.localdomain
[-- Attachment #1: refactor-main-loop --]
[-- Type: text/plain, Size: 10732 bytes --]
Break main loop into 3 main functions.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Index: qemu-iothread-4/vl.c
===================================================================
--- qemu-iothread-4.orig/vl.c
+++ qemu-iothread-4/vl.c
@@ -266,7 +266,7 @@ struct drive_opt drives_opt[MAX_DRIVES];
static CPUState *cur_cpu;
static CPUState *next_cpu;
-static int event_pending = 1;
+static int timer_alarm_pending = 1;
/* Conversion factor from emulated instructions to virtual clock ticks. */
static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
@@ -1350,7 +1350,7 @@ static void host_alarm_handler(int host_
}
#endif
}
- event_pending = 1;
+ timer_alarm_pending = 1;
qemu_notify_event();
}
}
@@ -3811,153 +3811,175 @@ void main_loop_wait(int timeout)
}
-static int main_loop(void)
+static int qemu_cpu_exec(CPUState *env)
{
- int ret, timeout;
+ int ret;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
- CPUState *env;
- cur_cpu = first_cpu;
- next_cpu = cur_cpu->next_cpu ?: first_cpu;
- for(;;) {
- if (vm_running) {
-
- for(;;) {
- /* get next cpu */
- env = next_cpu;
#ifdef CONFIG_PROFILER
- ti = profile_getclock();
+ ti = profile_getclock();
#endif
- if (use_icount) {
- int64_t count;
- int decr;
- qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
- env->icount_decr.u16.low = 0;
- env->icount_extra = 0;
- count = qemu_next_deadline();
- count = (count + (1 << icount_time_shift) - 1)
- >> icount_time_shift;
- qemu_icount += count;
- decr = (count > 0xffff) ? 0xffff : count;
- count -= decr;
- env->icount_decr.u16.low = decr;
- env->icount_extra = count;
- }
- ret = cpu_exec(env);
+ if (use_icount) {
+ int64_t count;
+ int decr;
+ qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
+ env->icount_decr.u16.low = 0;
+ env->icount_extra = 0;
+ count = qemu_next_deadline();
+ count = (count + (1 << icount_time_shift) - 1)
+ >> icount_time_shift;
+ qemu_icount += count;
+ decr = (count > 0xffff) ? 0xffff : count;
+ count -= decr;
+ env->icount_decr.u16.low = decr;
+ env->icount_extra = count;
+ }
+ ret = cpu_exec(env);
#ifdef CONFIG_PROFILER
- qemu_time += profile_getclock() - ti;
+ qemu_time += profile_getclock() - ti;
#endif
- if (use_icount) {
- /* Fold pending instructions back into the
- instruction counter, and clear the interrupt flag. */
- qemu_icount -= (env->icount_decr.u16.low
- + env->icount_extra);
- env->icount_decr.u32 = 0;
- env->icount_extra = 0;
- }
- next_cpu = env->next_cpu ?: first_cpu;
- if (event_pending && likely(ret != EXCP_DEBUG)) {
- ret = EXCP_INTERRUPT;
- event_pending = 0;
- break;
- }
- if (ret == EXCP_HLT) {
- /* Give the next CPU a chance to run. */
- cur_cpu = env;
- continue;
- }
- if (ret != EXCP_HALTED)
+ if (use_icount) {
+ /* Fold pending instructions back into the
+ instruction counter, and clear the interrupt flag. */
+ qemu_icount -= (env->icount_decr.u16.low
+ + env->icount_extra);
+ env->icount_decr.u32 = 0;
+ env->icount_extra = 0;
+ }
+ return ret;
+}
+
+static int cpu_has_work(CPUState *env)
+{
+ if (!env->halted)
+ return 1;
+ if (qemu_cpu_has_work(env))
+ return 1;
+ return 0;
+}
+
+static int tcg_has_work(void)
+{
+ CPUState *env;
+
+ for (env = first_cpu; env != NULL; env = env->next_cpu)
+ if (cpu_has_work(env))
+ return 1;
+ return 0;
+}
+
+static int qemu_calculate_timeout(void)
+{
+ int timeout;
+
+ if (!vm_running)
+ timeout = 5000;
+ else if (tcg_has_work())
+ timeout = 0;
+ else if (!use_icount)
+ timeout = 5000;
+ else {
+ /* XXX: use timeout computed from timers */
+ int64_t add;
+ int64_t delta;
+ /* Advance virtual time to the next event. */
+ if (use_icount == 1) {
+ /* When not using an adaptive execution frequency
+ we tend to get badly out of sync with real time,
+ so just delay for a reasonable amount of time. */
+ delta = 0;
+ } else {
+ delta = cpu_get_icount() - cpu_get_clock();
+ }
+ if (delta > 0) {
+ /* If virtual time is ahead of real time then just
+ wait for IO. */
+ timeout = (delta / 1000000) + 1;
+ } else {
+ /* Wait for either IO to occur or the next
+ timer event. */
+ add = qemu_next_deadline();
+ /* We advance the timer before checking for IO.
+ Limit the amount we advance so that early IO
+ activity won't get the guest too far ahead. */
+ if (add > 10000000)
+ add = 10000000;
+ delta += add;
+ add = (add + (1 << icount_time_shift) - 1)
+ >> icount_time_shift;
+ qemu_icount += add;
+ timeout = delta / 1000000;
+ if (timeout < 0)
+ timeout = 0;
+ }
+ }
+
+ return timeout;
+}
+
+static int vm_can_run(void)
+{
+ if (powerdown_requested)
+ return 0;
+ if (reset_requested)
+ return 0;
+ if (shutdown_requested)
+ return 0;
+ return 1;
+}
+
+static void main_loop(void)
+{
+ int ret = 0;
+#ifdef CONFIG_PROFILER
+ int64_t ti;
+#endif
+
+ for (;;) {
+ do {
+ if (next_cpu == NULL)
+ next_cpu = first_cpu;
+ for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
+ CPUState *env = cur_cpu = next_cpu;
+
+ if (!vm_running)
break;
- /* all CPUs are halted ? */
- if (env == cur_cpu)
+ if (timer_alarm_pending) {
+ timer_alarm_pending = 0;
break;
- }
- cur_cpu = env;
-
- if (shutdown_requested) {
- ret = EXCP_INTERRUPT;
- if (no_shutdown) {
- vm_stop(0);
- no_shutdown = 0;
}
- else
+ ret = qemu_cpu_exec(env);
+ if (ret == EXCP_DEBUG) {
+ gdb_set_stop_cpu(env);
break;
- }
- if (reset_requested) {
- reset_requested = 0;
- qemu_system_reset();
- ret = EXCP_INTERRUPT;
- }
- if (powerdown_requested) {
- powerdown_requested = 0;
- qemu_system_powerdown();
- ret = EXCP_INTERRUPT;
- }
- if (unlikely(ret == EXCP_DEBUG)) {
- gdb_set_stop_cpu(cur_cpu);
- vm_stop(EXCP_DEBUG);
- }
- /* If all cpus are halted then wait until the next IRQ */
- /* XXX: use timeout computed from timers */
- if (ret == EXCP_HALTED) {
- if (use_icount) {
- int64_t add;
- int64_t delta;
- /* Advance virtual time to the next event. */
- if (use_icount == 1) {
- /* When not using an adaptive execution frequency
- we tend to get badly out of sync with real time,
- so just delay for a reasonable amount of time. */
- delta = 0;
- } else {
- delta = cpu_get_icount() - cpu_get_clock();
- }
- if (delta > 0) {
- /* If virtual time is ahead of real time then just
- wait for IO. */
- timeout = (delta / 1000000) + 1;
- } else {
- /* Wait for either IO to occur or the next
- timer event. */
- add = qemu_next_deadline();
- /* We advance the timer before checking for IO.
- Limit the amount we advance so that early IO
- activity won't get the guest too far ahead. */
- if (add > 10000000)
- add = 10000000;
- delta += add;
- add = (add + (1 << icount_time_shift) - 1)
- >> icount_time_shift;
- qemu_icount += add;
- timeout = delta / 1000000;
- if (timeout < 0)
- timeout = 0;
- }
- } else {
- timeout = 5000;
}
- } else {
- timeout = 0;
}
- } else {
- if (shutdown_requested) {
- ret = EXCP_INTERRUPT;
- break;
- }
- timeout = 5000;
- }
#ifdef CONFIG_PROFILER
- ti = profile_getclock();
+ ti = profile_getclock();
#endif
- main_loop_wait(timeout);
+ main_loop_wait(qemu_calculate_timeout());
#ifdef CONFIG_PROFILER
- dev_time += profile_getclock() - ti;
+ dev_time += profile_getclock() - ti;
#endif
+ } while (ret != EXCP_DEBUG && vm_can_run());
+
+ if (ret == EXCP_DEBUG)
+ vm_stop(EXCP_DEBUG);
+
+ if (qemu_shutdown_requested()) {
+ if (no_shutdown) {
+ vm_stop(0);
+ no_shutdown = 0;
+ } else
+ break;
+ }
+ if (qemu_reset_requested())
+ qemu_system_reset();
+ if (qemu_powerdown_requested())
+ qemu_system_powerdown();
}
- cpu_disable_ticks();
- return ret;
}
static void version(void)
--
next prev parent reply other threads:[~2009-04-22 19:34 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20090407195126.467365249@localhost.localdomain>
[not found] ` <20090407195442.646407971@localhost.localdomain>
2009-04-16 20:53 ` [Qemu-devel] Re: [patch 01/11] qemu: create helper for event notification Anthony Liguori
2009-04-16 20:58 ` Marcelo Tosatti
2009-04-17 17:21 ` Anthony Liguori
[not found] ` <20090407195442.764405844@localhost.localdomain>
2009-04-17 13:53 ` [Qemu-devel] Re: [patch 02/11] qemu: mutex/thread/cond wrappers Anthony Liguori
[not found] ` <20090407195443.004166674@localhost.localdomain>
2009-04-17 13:57 ` [Qemu-devel] Re: [patch 04/11] qemu: introduce main_loop_break Anthony Liguori
[not found] ` <20090407195443.121795529@localhost.localdomain>
2009-04-17 14:05 ` [Qemu-devel] Re: [patch 05/11] qemu: separate thread for io Anthony Liguori
2009-04-18 20:45 ` Marcelo Tosatti
[not found] ` <20090407195443.716079176@localhost.localdomain>
2009-04-17 14:07 ` [Qemu-devel] Re: [patch 10/11] qemu: make iothread selectable at compile time Anthony Liguori
[not found] ` <20090407195443.832269134@localhost.localdomain>
2009-04-17 14:09 ` [Qemu-devel] Re: [patch 11/11] qemu: basic kvm iothread support Anthony Liguori
2009-04-22 19:15 ` [Qemu-devel] [patch 00/14] qemu: introduce iothread (v4) mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 01/14] qemu: create helper for event notification mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 02/14] qemu: mutex/thread/cond wrappers and configure tweaks mtosatti
2009-04-22 20:33 ` [Qemu-devel] " Anthony Liguori
2009-04-23 11:15 ` [Qemu-devel] svn trunk currently borked Martin Mohring
2009-04-23 11:58 ` Laurent Desnogues
2009-04-23 13:17 ` Anthony Liguori
2009-04-23 13:21 ` Martin Mohring
2009-04-22 19:15 ` [Qemu-devel] [patch 03/14] qemu: per-arch cpu_has_work mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 04/14] qemu: explictly rearm alarm timer on main_loop_wait mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 05/14] qemu: factor out special event notification mtosatti
2009-04-22 20:58 ` Anthony Liguori
2009-04-22 19:15 ` mtosatti [this message]
2009-04-22 19:15 ` [Qemu-devel] [patch 07/14] qemu: introduce qemu_init_vcpu mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 08/14] qemu: introduce qemu_cpu_kick mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 09/14] qemu: introduce qemu_init_main_loop mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 10/14] qemu: introduce lock/unlock_iothread mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 11/14] qemu: use debug_requested global instead of cpu_exec return mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 12/14] qemu: refactor tcg cpu execution loop mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 13/14] qemu: handle stop request in main loop mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 14/14] qemu: introduce iothread mtosatti
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20090422192119.991668223@localhost.localdomain \
--to=mtosatti@redhat.com \
--cc=aliguori@us.ibm.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).