From: Marcelo Tosatti <mtosatti@redhat.com>
To: qemu-devel@nongnu.org
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Subject: [Qemu-devel] [patch 05/11] qemu: separate thread for io
Date: Thu, 02 Apr 2009 20:32:55 -0300 [thread overview]
Message-ID: <20090402233746.094469683@localhost.localdomain> (raw)
In-Reply-To: 20090402233250.577870188@localhost.localdomain
[-- Attachment #1: introduce-io-thread --]
[-- Type: text/plain, Size: 8417 bytes --]
Introduce a thread to handle host IO events.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Index: trunk/qemu-common.h
===================================================================
--- trunk.orig/qemu-common.h
+++ trunk/qemu-common.h
@@ -191,6 +191,10 @@ void main_loop_break(void);
/* Force QEMU to process pending events */
void qemu_notify_event(void);
+/* Unblock cpu */
+void qemu_cpu_kick(void *env);
+int qemu_cpu_self(void *env);
+
typedef struct QEMUIOVector {
struct iovec *iov;
int niov;
Index: trunk/vl.c
===================================================================
--- trunk.orig/vl.c
+++ trunk/vl.c
@@ -146,6 +146,7 @@ int main(int argc, char **argv)
#include "gdbstub.h"
#include "qemu-timer.h"
#include "qemu-char.h"
+#include "qemu-thread.h"
#include "cache-utils.h"
#include "block.h"
#include "dma.h"
@@ -278,6 +279,13 @@ uint8_t qemu_uuid[16];
static int io_thread_fd = -1;
+QemuMutex qemu_global_mutex;
+QemuMutex qemu_fair_mutex;
+
+QemuThread io_thread;
+QemuThread cpus_thread;
+QemuCond halt_cond;
+
/***********************************************************/
/* x86 ISA bus support */
@@ -1347,8 +1355,6 @@ static void host_alarm_handler(int host_
write(alarm_timer_wfd, &byte, sizeof(byte));
#endif
alarm_timer->flags |= ALARM_FLAG_EXPIRED;
-
- qemu_notify_event();
}
}
@@ -2957,6 +2963,7 @@ int qemu_set_fd_handler2(int fd,
ioh->opaque = opaque;
ioh->deleted = 0;
}
+ main_loop_break();
return 0;
}
@@ -3324,7 +3331,6 @@ static int ram_load(QEMUFile *f, void *o
void qemu_service_io(void)
{
- qemu_notify_event();
}
/***********************************************************/
@@ -3397,7 +3403,7 @@ void qemu_bh_schedule(QEMUBH *bh)
bh->scheduled = 1;
bh->idle = 0;
/* stop the currently executing CPU to execute the BH ASAP */
- qemu_notify_event();
+ main_loop_break();
}
void qemu_bh_cancel(QEMUBH *bh)
@@ -3606,32 +3612,24 @@ void qemu_system_reset_request(void)
} else {
reset_requested = 1;
}
- qemu_notify_event();
+ main_loop_break();
}
void qemu_system_shutdown_request(void)
{
shutdown_requested = 1;
- qemu_notify_event();
+ main_loop_break();
}
void qemu_system_powerdown_request(void)
{
powerdown_requested = 1;
- qemu_notify_event();
+ main_loop_break();
}
void qemu_notify_event(void)
{
- CPUState *env = cpu_single_env;
-
- if (env) {
- cpu_exit(env);
-#ifdef USE_KQEMU
- if (env->kqemu_enabled)
- kqemu_cpu_interrupt(env);
-#endif
- }
+ main_loop_break();
}
void main_loop_break(void)
@@ -3733,6 +3731,105 @@ static void host_main_loop_wait(int *tim
}
#endif
+static int cpu_has_work(CPUState *env)
+{
+ if (!env->halted)
+ return 1;
+ if (qemu_cpu_has_work(env))
+ return 1;
+ return 0;
+}
+
+static int tcg_has_work(CPUState *env)
+{
+ for (env = first_cpu; env != NULL; env = env->next_cpu)
+ if (cpu_has_work(env))
+ return 1;
+ return 0;
+}
+
+static void qemu_wait_io_event(CPUState *env, int timeout)
+{
+ if (timeout)
+ while (!tcg_has_work(env))
+ qemu_cond_timedwait(&halt_cond, &qemu_global_mutex, timeout);
+
+ qemu_mutex_unlock(&qemu_global_mutex);
+
+ /*
+ * Users of qemu_global_mutex can be starved, having no chance
+ * to acquire it since this path will get to it first.
+ * So use another lock to provide fairness.
+ */
+ qemu_mutex_lock(&qemu_fair_mutex);
+ qemu_mutex_unlock(&qemu_fair_mutex);
+
+ qemu_mutex_lock(&qemu_global_mutex);
+}
+
+void qemu_cpu_kick(void *env)
+{
+ qemu_cond_broadcast(&halt_cond);
+}
+
+int qemu_cpu_self(void *env)
+{
+ return (cpu_single_env != NULL);
+}
+
+static void cpu_signal(int sig)
+{
+ if (cpu_single_env)
+ cpu_exit(cpu_single_env);
+}
+
+static void block_io_signals(void)
+{
+ sigset_t set;
+ struct sigaction sigact;
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR2);
+ sigaddset(&set, SIGIO);
+ sigaddset(&set, SIGALRM);
+ pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR1);
+ pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = cpu_signal;
+ sigaction(SIGUSR1, &sigact, NULL);
+}
+
+static void unblock_io_signals(void)
+{
+ sigset_t set;
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR2);
+ sigaddset(&set, SIGIO);
+ sigaddset(&set, SIGALRM);
+ pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR1);
+ pthread_sigmask(SIG_BLOCK, &set, NULL);
+}
+
+static void qemu_signal_lock(unsigned int msecs)
+{
+ qemu_mutex_lock(&qemu_fair_mutex);
+
+ while (qemu_mutex_trylock(&qemu_global_mutex)) {
+ qemu_thread_signal(&cpus_thread, SIGUSR1);
+ if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
+ break;
+ }
+ qemu_mutex_unlock(&qemu_fair_mutex);
+}
+
void main_loop_wait(int timeout)
{
IOHandlerRecord *ioh;
@@ -3775,7 +3872,14 @@ void main_loop_wait(int timeout)
slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
}
#endif
+
+ /*
+ * main_loop_wait() *must* not assume any global state is consistent across
+ * select() invocations.
+ */
+ qemu_mutex_unlock(&qemu_global_mutex);
ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
+ qemu_signal_lock(100);
if (ret > 0) {
IOHandlerRecord **pioh;
@@ -3811,9 +3915,11 @@ void main_loop_wait(int timeout)
#endif
/* vm time timers */
- if (vm_running && likely(!(cur_cpu->singlestep_enabled & SSTEP_NOTIMER)))
- qemu_run_timers(&active_timers[QEMU_TIMER_VIRTUAL],
- qemu_get_clock(vm_clock));
+ if (vm_running) {
+ if (cur_cpu && likely(!(cur_cpu->singlestep_enabled & SSTEP_NOTIMER)))
+ qemu_run_timers(&active_timers[QEMU_TIMER_VIRTUAL],
+ qemu_get_clock(vm_clock));
+ }
/* real time timers */
qemu_run_timers(&active_timers[QEMU_TIMER_REALTIME],
@@ -3837,9 +3943,10 @@ static void setup_iothread_fd(void)
qemu_set_fd_handler2(fds[0], NULL, io_thread_wakeup, NULL,
(void *)(unsigned long)fds[0]);
io_thread_fd = fds[1];
+ fcntl(io_thread_fd, F_SETFL, O_NONBLOCK);
}
-static int main_loop(void)
+static void *cpu_main_loop(void *arg)
{
int ret, timeout;
#ifdef CONFIG_PROFILER
@@ -3847,7 +3954,12 @@ static int main_loop(void)
#endif
CPUState *env;
- cur_cpu = first_cpu;
+ block_io_signals();
+ qemu_thread_self(&cpus_thread);
+
+ qemu_mutex_lock(&qemu_global_mutex);
+
+ cur_cpu = env = first_cpu;
next_cpu = cur_cpu->next_cpu ?: first_cpu;
for(;;) {
if (vm_running) {
@@ -3970,6 +4082,7 @@ static int main_loop(void)
timeout = 0;
}
} else {
+ env = env->next_cpu ?: first_cpu;
if (shutdown_requested) {
ret = EXCP_INTERRUPT;
break;
@@ -3979,13 +4092,31 @@ static int main_loop(void)
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
- main_loop_wait(timeout);
+ qemu_wait_io_event(env, timeout);
#ifdef CONFIG_PROFILER
dev_time += profile_getclock() - ti;
#endif
}
cpu_disable_ticks();
- return ret;
+ return NULL;
+}
+
+static void main_loop(void)
+{
+ qemu_cond_init(&halt_cond);
+ qemu_mutex_init(&qemu_fair_mutex);
+ qemu_mutex_init(&qemu_global_mutex);
+ qemu_mutex_lock(&qemu_global_mutex);
+
+ qemu_thread_self(&io_thread);
+ setup_iothread_fd();
+
+ unblock_io_signals();
+
+ qemu_thread_create(&cpus_thread, cpu_main_loop, NULL);
+
+ while (1)
+ main_loop_wait(1000);
}
static void help(int exitcode)
Index: trunk/exec.c
===================================================================
--- trunk.orig/exec.c
+++ trunk/exec.c
@@ -1532,6 +1532,13 @@ void cpu_interrupt(CPUState *env, int ma
old_mask = env->interrupt_request;
env->interrupt_request |= mask;
+#ifndef CONFIG_USER_ONLY
+ if (!qemu_cpu_self(env)) {
+ qemu_cpu_kick(env);
+ return;
+ }
+#endif
+
if (use_icount) {
env->icount_decr.u16.high = 0xffff;
#ifndef CONFIG_USER_ONLY
--
next prev parent reply other threads:[~2009-04-02 23:39 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-04-02 23:32 [Qemu-devel] [patch 00/11] iothread v2 Marcelo Tosatti
2009-04-02 23:32 ` [Qemu-devel] [patch 01/11] qemu: create helper for event notification Marcelo Tosatti
2009-04-02 23:32 ` [Qemu-devel] [patch 02/11] qemu: mutex/thread/cond wrappers Marcelo Tosatti
2009-04-06 18:21 ` Anthony Liguori
2009-04-06 19:20 ` Marcelo Tosatti
2009-04-06 19:35 ` Anthony Liguori
2009-04-02 23:32 ` [Qemu-devel] [patch 03/11] qemu: per-arch cpu_has_work Marcelo Tosatti
2009-04-02 23:32 ` [Qemu-devel] [patch 04/11] qemu: introduce main_loop_break Marcelo Tosatti
2009-04-02 23:32 ` Marcelo Tosatti [this message]
2009-04-02 23:32 ` [Qemu-devel] [patch 06/11] qemu: per-cpu thread information Marcelo Tosatti
2009-04-02 23:32 ` [Qemu-devel] [patch 07/11] qemu: handle reset/poweroff/shutdown in iothread Marcelo Tosatti
2009-04-02 23:32 ` [Qemu-devel] [patch 08/11] qemu: pause and resume cpu threads Marcelo Tosatti
2009-04-02 23:32 ` [Qemu-devel] [patch 09/11] qemu: handle vmstop from cpu context Marcelo Tosatti
2009-04-06 18:06 ` Anthony Liguori
2009-04-02 23:33 ` [Qemu-devel] [patch 10/11] qemu: make iothread selectable at compile time Marcelo Tosatti
2009-04-02 23:33 ` [Qemu-devel] [patch 11/11] qemu: basic kvm iothread support Marcelo Tosatti
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20090402233746.094469683@localhost.localdomain \
--to=mtosatti@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).