From: mtosatti@redhat.com
To: qemu-devel@nongnu.org, aliguori@us.ibm.com
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Subject: [Qemu-devel] [patch 14/14] qemu: introduce iothread
Date: Wed, 22 Apr 2009 16:15:18 -0300 [thread overview]
Message-ID: <20090422192120.950192749@localhost.localdomain> (raw)
In-Reply-To: 20090422191504.975476933@localhost.localdomain
[-- Attachment #1: iothread-the --]
[-- Type: text/plain, Size: 14618 bytes --]
Fill in the hooks and introduce iothread.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Index: qemu-iothread-4/vl.c
===================================================================
--- qemu-iothread-4.orig/vl.c
+++ qemu-iothread-4/vl.c
@@ -1337,19 +1337,19 @@ static void host_alarm_handler(int host_
qemu_get_clock(vm_clock))) ||
qemu_timer_expired(active_timers[QEMU_TIMER_REALTIME],
qemu_get_clock(rt_clock))) {
- CPUState *env = next_cpu;
-
alarm_timer->flags |= ALARM_FLAG_EXPIRED;
- if (env) {
+#ifndef CONFIG_IOTHREAD
+ if (next_cpu) {
/* stop the currently executing cpu because a timer occured */
- cpu_exit(env);
+ cpu_exit(next_cpu);
#ifdef USE_KQEMU
- if (env->kqemu_enabled) {
- kqemu_cpu_interrupt(env);
+ if (next_cpu->kqemu_enabled) {
+ kqemu_cpu_interrupt(next_cpu);
}
#endif
}
+#endif
timer_alarm_pending = 1;
qemu_notify_event();
}
@@ -3473,6 +3473,9 @@ static void vm_state_notify(int running,
}
}
+static void resume_all_vcpus(void);
+static void pause_all_vcpus(void);
+
void vm_start(void)
{
if (!vm_running) {
@@ -3480,6 +3483,7 @@ void vm_start(void)
vm_running = 1;
vm_state_notify(1, 0);
qemu_rearm_alarm_timer(alarm_timer);
+ resume_all_vcpus();
}
}
@@ -3538,6 +3542,7 @@ static void do_vm_stop(int reason)
if (vm_running) {
cpu_disable_ticks();
vm_running = 0;
+ pause_all_vcpus();
vm_state_notify(0, reason);
}
}
@@ -3590,18 +3595,13 @@ void qemu_system_powerdown_request(void)
qemu_notify_event();
}
-void qemu_notify_event(void)
+#ifdef CONFIG_IOTHREAD
+static void qemu_system_vmstop_request(int reason)
{
- CPUState *env = cpu_single_env;
-
- if (env) {
- cpu_exit(env);
-#ifdef USE_KQEMU
- if (env->kqemu_enabled)
- kqemu_cpu_interrupt(env);
-#endif
- }
+ vmstop_requested = reason;
+ qemu_notify_event();
}
+#endif
#ifdef CONFIG_IOTHREAD
#ifndef _WIN32
@@ -3679,6 +3679,17 @@ static void qemu_event_increment(void)
#endif
#endif
+
+static int cpu_can_run(CPUState *env)
+{
+ if (env->stop)
+ return 0;
+ if (env->stopped)
+ return 0;
+ return 1;
+}
+
+#ifndef CONFIG_IOTHREAD
void qemu_init_vcpu(void *_env)
{
CPUState *env = _env;
@@ -3698,10 +3709,31 @@ void qemu_cpu_kick(void *env)
return;
}
+static void resume_all_vcpus(void)
+{
+}
+
+static void pause_all_vcpus(void)
+{
+}
+
static void qemu_init_main_loop(void)
{
}
+void qemu_notify_event(void)
+{
+ CPUState *env = cpu_single_env;
+
+ if (env) {
+ cpu_exit(env);
+#ifdef USE_KQEMU
+ if (env->kqemu_enabled)
+ kqemu_cpu_interrupt(env);
+#endif
+ }
+}
+
#define qemu_mutex_lock_iothread() do { } while (0)
#define qemu_mutex_unlock_iothread() do { } while (0)
@@ -3710,6 +3742,315 @@ void vm_stop(int reason)
do_vm_stop(reason);
}
+#else /* CONFIG_IOTHREAD */
+
+#include "qemu-thread.h"
+
+QemuMutex qemu_global_mutex;
+static QemuMutex qemu_fair_mutex;
+
+static QemuThread io_thread;
+
+static QemuThread *tcg_cpu_thread;
+static QemuCond *tcg_halt_cond;
+
+static int qemu_system_ready;
+/* cpu creation */
+static QemuCond qemu_cpu_cond;
+/* system init */
+static QemuCond qemu_system_cond;
+static QemuCond qemu_pause_cond;
+
+static void block_io_signals(void);
+static void unblock_io_signals(void);
+static int tcg_has_work(void);
+
+static void qemu_init_main_loop(void)
+{
+ qemu_cond_init(&qemu_pause_cond);
+ qemu_mutex_init(&qemu_fair_mutex);
+ qemu_mutex_init(&qemu_global_mutex);
+ qemu_mutex_lock(&qemu_global_mutex);
+
+ qemu_event_init();
+
+ unblock_io_signals();
+ qemu_thread_self(&io_thread);
+}
+
+static void qemu_wait_io_event(CPUState *env)
+{
+ while (!tcg_has_work())
+ qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
+
+ qemu_mutex_unlock(&qemu_global_mutex);
+
+ /*
+ * Users of qemu_global_mutex can be starved, having no chance
+ * to acquire it since this path will get to it first.
+ * So use another lock to provide fairness.
+ */
+ qemu_mutex_lock(&qemu_fair_mutex);
+ qemu_mutex_unlock(&qemu_fair_mutex);
+
+ qemu_mutex_lock(&qemu_global_mutex);
+ if (env->stop) {
+ env->stop = 0;
+ env->stopped = 1;
+ qemu_cond_signal(&qemu_pause_cond);
+ }
+}
+
+static int qemu_cpu_exec(CPUState *env);
+
+static void *kvm_cpu_thread_fn(void *arg)
+{
+ CPUState *env = arg;
+
+ block_io_signals();
+ qemu_thread_self(env->thread);
+
+ /* signal CPU creation */
+ qemu_mutex_lock(&qemu_global_mutex);
+ env->created = 1;
+ qemu_cond_signal(&qemu_cpu_cond);
+
+ /* and wait for machine initialization */
+ while (!qemu_system_ready)
+ qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
+
+ while (1) {
+ if (cpu_can_run(env))
+ qemu_cpu_exec(env);
+ qemu_wait_io_event(env);
+ }
+
+ return NULL;
+}
+
+static void tcg_cpu_exec(void);
+
+static void *tcg_cpu_thread_fn(void *arg)
+{
+ CPUState *env = arg;
+
+ block_io_signals();
+ qemu_thread_self(env->thread);
+
+ /* signal CPU creation */
+ qemu_mutex_lock(&qemu_global_mutex);
+ for (env = first_cpu; env != NULL; env = env->next_cpu)
+ env->created = 1;
+ qemu_cond_signal(&qemu_cpu_cond);
+
+ /* and wait for machine initialization */
+ while (!qemu_system_ready)
+ qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
+
+ while (1) {
+ tcg_cpu_exec();
+ qemu_wait_io_event(cur_cpu);
+ }
+
+ return NULL;
+}
+
+void qemu_cpu_kick(void *_env)
+{
+ CPUState *env = _env;
+ qemu_cond_broadcast(env->halt_cond);
+ if (kvm_enabled())
+ qemu_thread_signal(env->thread, SIGUSR1);
+}
+
+int qemu_cpu_self(void *env)
+{
+ return (cpu_single_env != NULL);
+}
+
+static void cpu_signal(int sig)
+{
+ if (cpu_single_env)
+ cpu_exit(cpu_single_env);
+}
+
+static void block_io_signals(void)
+{
+ sigset_t set;
+ struct sigaction sigact;
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR2);
+ sigaddset(&set, SIGIO);
+ sigaddset(&set, SIGALRM);
+ pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR1);
+ pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = cpu_signal;
+ sigaction(SIGUSR1, &sigact, NULL);
+}
+
+static void unblock_io_signals(void)
+{
+ sigset_t set;
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR2);
+ sigaddset(&set, SIGIO);
+ sigaddset(&set, SIGALRM);
+ pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR1);
+ pthread_sigmask(SIG_BLOCK, &set, NULL);
+}
+
+static void qemu_signal_lock(unsigned int msecs)
+{
+ qemu_mutex_lock(&qemu_fair_mutex);
+
+ while (qemu_mutex_trylock(&qemu_global_mutex)) {
+ qemu_thread_signal(tcg_cpu_thread, SIGUSR1);
+ if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
+ break;
+ }
+ qemu_mutex_unlock(&qemu_fair_mutex);
+}
+
+static void qemu_mutex_lock_iothread(void)
+{
+ if (kvm_enabled()) {
+ qemu_mutex_lock(&qemu_fair_mutex);
+ qemu_mutex_lock(&qemu_global_mutex);
+ qemu_mutex_unlock(&qemu_fair_mutex);
+ } else
+ qemu_signal_lock(100);
+}
+
+static void qemu_mutex_unlock_iothread(void)
+{
+ qemu_mutex_unlock(&qemu_global_mutex);
+}
+
+static int all_vcpus_paused(void)
+{
+ CPUState *penv = first_cpu;
+
+ while (penv) {
+ if (!penv->stopped)
+ return 0;
+ penv = (CPUState *)penv->next_cpu;
+ }
+
+ return 1;
+}
+
+static void pause_all_vcpus(void)
+{
+ CPUState *penv = first_cpu;
+
+ while (penv) {
+ penv->stop = 1;
+ qemu_thread_signal(penv->thread, SIGUSR1);
+ qemu_cpu_kick(penv);
+ penv = (CPUState *)penv->next_cpu;
+ }
+
+ while (!all_vcpus_paused()) {
+ qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
+ penv = first_cpu;
+ while (penv) {
+ qemu_thread_signal(penv->thread, SIGUSR1);
+ penv = (CPUState *)penv->next_cpu;
+ }
+ }
+}
+
+static void resume_all_vcpus(void)
+{
+ CPUState *penv = first_cpu;
+
+ while (penv) {
+ penv->stop = 0;
+ penv->stopped = 0;
+ qemu_thread_signal(penv->thread, SIGUSR1);
+ qemu_cpu_kick(penv);
+ penv = (CPUState *)penv->next_cpu;
+ }
+}
+
+static void tcg_init_vcpu(void *_env)
+{
+ CPUState *env = _env;
+ /* share a single thread for all cpus with TCG */
+ if (!tcg_cpu_thread) {
+ env->thread = qemu_mallocz(sizeof(QemuThread));
+ env->halt_cond = qemu_mallocz(sizeof(QemuCond));
+ qemu_cond_init(env->halt_cond);
+ qemu_thread_create(env->thread, tcg_cpu_thread_fn, env);
+ while (env->created == 0)
+ qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
+ tcg_cpu_thread = env->thread;
+ tcg_halt_cond = env->halt_cond;
+ } else {
+ env->thread = tcg_cpu_thread;
+ env->halt_cond = tcg_halt_cond;
+ }
+}
+
+static void kvm_start_vcpu(CPUState *env)
+{
+ kvm_init_vcpu(env);
+ env->thread = qemu_mallocz(sizeof(QemuThread));
+ env->halt_cond = qemu_mallocz(sizeof(QemuCond));
+ qemu_cond_init(env->halt_cond);
+ qemu_thread_create(env->thread, kvm_cpu_thread_fn, env);
+ while (env->created == 0)
+ qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
+}
+
+void qemu_init_vcpu(void *_env)
+{
+ CPUState *env = _env;
+
+ if (kvm_enabled())
+ kvm_start_vcpu(env);
+ else
+ tcg_init_vcpu(env);
+}
+
+void qemu_notify_event(void)
+{
+ qemu_event_increment();
+}
+
+void vm_stop(int reason)
+{
+ QemuThread me;
+ qemu_thread_self(&me);
+
+ if (!qemu_thread_equal(&me, &io_thread)) {
+ qemu_system_vmstop_request(reason);
+ /*
+ * FIXME: should not return to device code in case
+ * vm_stop() has been requested.
+ */
+ if (cpu_single_env) {
+ cpu_exit(cpu_single_env);
+ cpu_single_env->stop = 1;
+ }
+ return;
+ }
+ do_vm_stop(reason);
+}
+
+#endif
+
+
#ifdef _WIN32
static void host_main_loop_wait(int *timeout)
{
@@ -3846,9 +4187,11 @@ void main_loop_wait(int timeout)
}
/* vm time timers */
- if (vm_running && likely(!(cur_cpu->singlestep_enabled & SSTEP_NOTIMER)))
- qemu_run_timers(&active_timers[QEMU_TIMER_VIRTUAL],
- qemu_get_clock(vm_clock));
+ if (vm_running) {
+ if (!cur_cpu || likely(!(cur_cpu->singlestep_enabled & SSTEP_NOTIMER)))
+ qemu_run_timers(&active_timers[QEMU_TIMER_VIRTUAL],
+ qemu_get_clock(vm_clock));
+ }
/* real time timers */
qemu_run_timers(&active_timers[QEMU_TIMER_REALTIME],
@@ -3902,7 +4245,7 @@ static int qemu_cpu_exec(CPUState *env)
static void tcg_cpu_exec(void)
{
- int ret;
+ int ret = 0;
if (next_cpu == NULL)
next_cpu = first_cpu;
@@ -3915,7 +4258,8 @@ static void tcg_cpu_exec(void)
timer_alarm_pending = 0;
break;
}
- ret = qemu_cpu_exec(env);
+ if (cpu_can_run(env))
+ ret = qemu_cpu_exec(env);
if (ret == EXCP_DEBUG) {
gdb_set_stop_cpu(env);
debug_requested = 1;
@@ -3926,6 +4270,10 @@ static void tcg_cpu_exec(void)
static int cpu_has_work(CPUState *env)
{
+ if (env->stop)
+ return 1;
+ if (env->stopped)
+ return 0;
if (!env->halted)
return 1;
if (qemu_cpu_has_work(env))
@@ -4009,16 +4357,27 @@ static void main_loop(void)
{
int r;
+#ifdef CONFIG_IOTHREAD
+ qemu_system_ready = 1;
+ qemu_cond_broadcast(&qemu_system_cond);
+#endif
+
for (;;) {
do {
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
+#ifndef CONFIG_IOTHREAD
tcg_cpu_exec();
+#endif
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
+#ifdef CONFIG_IOTHREAD
+ main_loop_wait(1000);
+#else
main_loop_wait(qemu_calculate_timeout());
+#endif
#ifdef CONFIG_PROFILER
dev_time += profile_getclock() - ti;
#endif
@@ -4033,13 +4392,17 @@ static void main_loop(void)
} else
break;
}
- if (qemu_reset_requested())
+ if (qemu_reset_requested()) {
+ pause_all_vcpus();
qemu_system_reset();
+ resume_all_vcpus();
+ }
if (qemu_powerdown_requested())
qemu_system_powerdown();
if ((r = qemu_vmstop_requested()))
vm_stop(r);
}
+ pause_all_vcpus();
}
static void version(void)
Index: qemu-iothread-4/cpu-defs.h
===================================================================
--- qemu-iothread-4.orig/cpu-defs.h
+++ qemu-iothread-4/cpu-defs.h
@@ -170,6 +170,8 @@ typedef struct CPUWatchpoint {
target_ulong mem_io_vaddr; /* target virtual addr at which the \
memory was accessed */ \
uint32_t halted; /* Nonzero if the CPU is in suspend state */ \
+ uint32_t stop; /* Stop request */ \
+ uint32_t stopped; /* Artificially stopped */ \
uint32_t interrupt_request; \
volatile sig_atomic_t exit_request; \
/* The meaning of the MMU modes is defined in the target code. */ \
@@ -209,6 +211,9 @@ typedef struct CPUWatchpoint {
/* user data */ \
void *opaque; \
\
+ uint32_t created; \
+ struct QemuThread *thread; \
+ struct QemuCond *halt_cond; \
const char *cpu_model_str; \
struct KVMState *kvm_state; \
struct kvm_run *kvm_run; \
--
prev parent reply other threads:[~2009-04-22 19:34 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20090407195126.467365249@localhost.localdomain>
[not found] ` <20090407195442.646407971@localhost.localdomain>
2009-04-16 20:53 ` [Qemu-devel] Re: [patch 01/11] qemu: create helper for event notification Anthony Liguori
2009-04-16 20:58 ` Marcelo Tosatti
2009-04-17 17:21 ` Anthony Liguori
[not found] ` <20090407195442.764405844@localhost.localdomain>
2009-04-17 13:53 ` [Qemu-devel] Re: [patch 02/11] qemu: mutex/thread/cond wrappers Anthony Liguori
[not found] ` <20090407195443.004166674@localhost.localdomain>
2009-04-17 13:57 ` [Qemu-devel] Re: [patch 04/11] qemu: introduce main_loop_break Anthony Liguori
[not found] ` <20090407195443.121795529@localhost.localdomain>
2009-04-17 14:05 ` [Qemu-devel] Re: [patch 05/11] qemu: separate thread for io Anthony Liguori
2009-04-18 20:45 ` Marcelo Tosatti
[not found] ` <20090407195443.716079176@localhost.localdomain>
2009-04-17 14:07 ` [Qemu-devel] Re: [patch 10/11] qemu: make iothread selectable at compile time Anthony Liguori
[not found] ` <20090407195443.832269134@localhost.localdomain>
2009-04-17 14:09 ` [Qemu-devel] Re: [patch 11/11] qemu: basic kvm iothread support Anthony Liguori
2009-04-22 19:15 ` [Qemu-devel] [patch 00/14] qemu: introduce iothread (v4) mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 01/14] qemu: create helper for event notification mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 02/14] qemu: mutex/thread/cond wrappers and configure tweaks mtosatti
2009-04-22 20:33 ` [Qemu-devel] " Anthony Liguori
2009-04-23 11:15 ` [Qemu-devel] svn trunk currently borked Martin Mohring
2009-04-23 11:58 ` Laurent Desnogues
2009-04-23 13:17 ` Anthony Liguori
2009-04-23 13:21 ` Martin Mohring
2009-04-22 19:15 ` [Qemu-devel] [patch 03/14] qemu: per-arch cpu_has_work mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 04/14] qemu: explictly rearm alarm timer on main_loop_wait mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 05/14] qemu: factor out special event notification mtosatti
2009-04-22 20:58 ` Anthony Liguori
2009-04-22 19:15 ` [Qemu-devel] [patch 06/14] qemu: refactor main_loop mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 07/14] qemu: introduce qemu_init_vcpu mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 08/14] qemu: introduce qemu_cpu_kick mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 09/14] qemu: introduce qemu_init_main_loop mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 10/14] qemu: introduce lock/unlock_iothread mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 11/14] qemu: use debug_requested global instead of cpu_exec return mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 12/14] qemu: refactor tcg cpu execution loop mtosatti
2009-04-22 19:15 ` [Qemu-devel] [patch 13/14] qemu: handle stop request in main loop mtosatti
2009-04-22 19:15 ` mtosatti [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20090422192120.950192749@localhost.localdomain \
--to=mtosatti@redhat.com \
--cc=aliguori@us.ibm.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).