From: Marcelo Tosatti <mtosatti@redhat.com>
To: qemu-devel@nongnu.org
Cc: Anthony Liguori <aliguori@us.ibm.com>
Subject: [Qemu-devel] [patch 2/2] qemu: separate thread for io
Date: Wed, 11 Mar 2009 13:16:47 -0300 [thread overview]
Message-ID: <20090311161942.587584232@localhost.localdomain> (raw)
In-Reply-To: 20090311161645.344003675@localhost.localdomain
[-- Attachment #1: introduce-iothread --]
[-- Type: text/plain, Size: 6781 bytes --]
Index: qemu/exec.c
===================================================================
--- qemu.orig/exec.c
+++ qemu/exec.c
@@ -1513,6 +1513,20 @@ void cpu_interrupt(CPUState *env, int ma
/* FIXME: This is probably not threadsafe. A different thread could
be in the middle of a read-modify-write operation. */
env->interrupt_request |= mask;
+
+ switch(mask) {
+ case CPU_INTERRUPT_HARD:
+ case CPU_INTERRUPT_SMI:
+ case CPU_INTERRUPT_NMI:
+ case CPU_INTERRUPT_EXIT:
+ /*
+ * only unlink the TB's if we're called from cpu thread context,
+ * otherwise signal cpu thread to do it.
+ */
+ if (qemu_notify_event(env))
+ return;
+ }
+
#if defined(USE_NPTL)
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
problem and hope the cpu will stop of its own accord. For userspace
Index: qemu/qemu-common.h
===================================================================
--- qemu.orig/qemu-common.h
+++ qemu/qemu-common.h
@@ -190,6 +190,8 @@ int cpu_load(QEMUFile *f, void *opaque,
/* Force QEMU to stop what it's doing and service IO */
void qemu_service_io(void);
+void main_loop_break(void);
+int qemu_notify_event(void *env);
typedef struct QEMUIOVector {
struct iovec *iov;
Index: qemu/vl.c
===================================================================
--- qemu.orig/vl.c
+++ qemu/vl.c
@@ -265,6 +265,8 @@ static QEMUTimer *nographic_timer;
uint8_t qemu_uuid[16];
QemuSem qemu_sem;
+QemuThread qemu_io_thread;
+QemuThread cpus_thread;
/***********************************************************/
/* x86 ISA bus support */
@@ -1328,7 +1330,6 @@ static void host_alarm_handler(int host_
qemu_get_clock(vm_clock))) ||
qemu_timer_expired(active_timers[QEMU_TIMER_REALTIME],
qemu_get_clock(rt_clock))) {
- CPUState *env = next_cpu;
#ifdef _WIN32
struct qemu_alarm_win32 *data = ((struct qemu_alarm_timer*)dwUser)->priv;
@@ -1339,15 +1340,6 @@ static void host_alarm_handler(int host_
#endif
alarm_timer->flags |= ALARM_FLAG_EXPIRED;
- if (env) {
- /* stop the currently executing cpu because a timer occured */
- cpu_interrupt(env, CPU_INTERRUPT_EXIT);
-#ifdef USE_KQEMU
- if (env->kqemu_enabled) {
- kqemu_cpu_interrupt(env);
- }
-#endif
- }
event_pending = 1;
}
}
@@ -2878,6 +2870,7 @@ int qemu_set_fd_handler2(int fd,
ioh->opaque = opaque;
ioh->deleted = 0;
}
+ main_loop_break();
return 0;
}
@@ -3334,6 +3327,7 @@ void qemu_bh_schedule(QEMUBH *bh)
if (env) {
cpu_interrupt(env, CPU_INTERRUPT_EXIT);
}
+ main_loop_break();
}
void qemu_bh_cancel(QEMUBH *bh)
@@ -3611,6 +3605,138 @@ static void host_main_loop_wait(int *tim
}
#endif
+static int wait_signal(int timeout)
+{
+ struct timespec ts;
+ sigset_t waitset;
+
+ if (!timeout)
+ timeout = 1;
+
+ ts.tv_sec = timeout / 1000;
+ ts.tv_nsec = (timeout % 1000) * 1000000;
+ sigemptyset(&waitset);
+ sigaddset(&waitset, SIGUSR1);
+
+ return sigtimedwait(&waitset, NULL, &ts);
+}
+
+static int has_work(CPUState *env)
+{
+ int r = 0;
+ if (!env->halted)
+ r = 1;
+ if (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI))
+ r = 1;
+ return r;
+}
+
+static void qemu_wait_io_event(CPUState *env, int timeout)
+{
+ qemu_sem_unlock(&qemu_sem);
+
+ if (!has_work(env))
+ wait_signal(timeout);
+ /*
+ * FIXME: sem_post only wakes up the waiting thread, there is no
+ * guarantee it has acquired the semaphore. Need synchronization, perhaps
+ * with pthread conditional, instead of usleep(1).
+ */
+ else
+ usleep(1);
+
+ qemu_sem_lock(&qemu_sem);
+}
+
+static void cpu_signal(int sig)
+{
+ QemuThread self;
+ CPUState *env = cpu_single_env;
+
+ event_pending = 1;
+
+ qemu_thread_self(&self);
+ if (!qemu_thread_equal(&self, &cpus_thread))
+ return;
+
+ if (env) {
+ /* stop the currently executing cpu because an event occurred */
+ cpu_interrupt(env, CPU_INTERRUPT_EXIT);
+#ifdef USE_KQEMU
+ if (env->kqemu_enabled) {
+ kqemu_cpu_interrupt(env);
+ }
+#endif
+ }
+}
+
+static void block_io_signals(void)
+{
+ sigset_t set;
+ struct sigaction sigact;
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR2);
+ sigaddset(&set, SIGIO);
+ sigaddset(&set, SIGALRM);
+ pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR1);
+ pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = cpu_signal;
+ sigaction(SIGUSR1, &sigact, NULL);
+}
+
+static void unblock_io_signals(void)
+{
+ sigset_t set;
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR2);
+ sigaddset(&set, SIGIO);
+ sigaddset(&set, SIGALRM);
+ pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR1);
+ pthread_sigmask(SIG_BLOCK, &set, NULL);
+}
+
+int qemu_notify_event(void *cpu_env)
+{
+ QemuThread me;
+ qemu_thread_self(&me);
+
+ if (qemu_thread_equal(&cpus_thread, &me))
+ return 0;
+ qemu_thread_signal(&cpus_thread, SIGUSR1);
+ return 1;
+}
+
+void main_loop_break(void)
+{
+}
+
+static void *io_thread_fn(void *arg)
+{
+ unblock_io_signals();
+ qemu_sem_lock(&qemu_sem);
+ while (1)
+ main_loop_wait(1000);
+}
+
+static void qemu_signal_lock(QemuSem *sem, unsigned int msecs)
+{
+ while (qemu_sem_trylock(sem)) {
+ qemu_thread_signal(&cpus_thread, SIGUSR1);
+ if (!qemu_sem_timedlock(&qemu_sem, msecs))
+ break;
+ }
+}
+
void main_loop_wait(int timeout)
{
IOHandlerRecord *ioh;
@@ -3656,7 +3782,7 @@ void main_loop_wait(int timeout)
qemu_sem_unlock(&qemu_sem);
ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
- qemu_sem_lock(&qemu_sem);
+ qemu_signal_lock(&qemu_sem, 100);
if (ret > 0) {
IOHandlerRecord **pioh;
@@ -3717,6 +3843,11 @@ static int main_loop(void)
qemu_sem_init(&qemu_sem);
qemu_sem_lock(&qemu_sem);
+ qemu_thread_create(&qemu_io_thread, io_thread_fn, NULL);
+ block_io_signals();
+
+ qemu_thread_self(&cpus_thread);
+
cur_cpu = first_cpu;
next_cpu = cur_cpu->next_cpu ?: first_cpu;
for(;;) {
@@ -3849,7 +3980,7 @@ static int main_loop(void)
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
- main_loop_wait(timeout);
+ qemu_wait_io_event(env, timeout);
#ifdef CONFIG_PROFILER
dev_time += profile_getclock() - ti;
#endif
--
prev parent reply other threads:[~2009-03-11 16:21 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-03-11 16:16 [Qemu-devel] [patch 0/2] RFC: separate thread for IO Marcelo Tosatti
2009-03-11 16:16 ` [Qemu-devel] [patch 1/2] qemu: sem/thread helpers Marcelo Tosatti
2009-03-11 16:33 ` [Qemu-devel] " Anthony Liguori
2009-03-11 16:48 ` [Qemu-devel] " Paul Brook
2009-03-11 16:56 ` Anthony Liguori
2009-03-11 16:58 ` Marcelo Tosatti
2009-03-18 18:47 ` Marcelo Tosatti
2009-03-15 14:15 ` Avi Kivity
2009-03-17 17:42 ` Marcelo Tosatti
2009-03-17 23:07 ` Paul Brook
2009-03-17 23:43 ` Marcelo Tosatti
2009-03-11 16:16 ` Marcelo Tosatti [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20090311161942.587584232@localhost.localdomain \
--to=mtosatti@redhat.com \
--cc=aliguori@us.ibm.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).