qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] [patch 00/10] iothread (candidate for inclusion)
@ 2009-03-25 22:47 Marcelo Tosatti
  2009-03-25 22:47 ` [Qemu-devel] [patch 01/10] qemu: create helper for event notification Marcelo Tosatti
                   ` (10 more replies)
  0 siblings, 11 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 22:47 UTC (permalink / raw)
  To: qemu-devel

This version is simplified thanks to the removal of CPU_INTERRUPT_EXIT.

There are two pending issues: vm_stop() from ENOSPC handling and GDB contexes.
IMO those can be worked while testing is performed by developers.

KVM SMP support is missing, but most of the infra required is present, so
it should be relatively simple.

There's also lots of room for TCG perf improvements.

Tested with target-x86_64 and target-ppc.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [Qemu-devel] [patch 01/10] qemu: create helper for event notification
  2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
@ 2009-03-25 22:47 ` Marcelo Tosatti
  2009-03-25 23:18   ` Glauber Costa
  2009-03-25 22:47 ` [Qemu-devel] [patch 02/10] qemu: mutex/thread/cond wrappers Marcelo Tosatti
                   ` (9 subsequent siblings)
  10 siblings, 1 reply; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 22:47 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

[-- Attachment #1: abstract-qemu-event --]
[-- Type: text/plain, Size: 4209 bytes --]

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: trunk/hw/dma.c
===================================================================
--- trunk.orig/hw/dma.c
+++ trunk/hw/dma.c
@@ -445,9 +445,7 @@ int DMA_write_memory (int nchan, void *b
 /* request the emulator to transfer a new DMA memory block ASAP */
 void DMA_schedule(int nchan)
 {
-    CPUState *env = cpu_single_env;
-    if (env)
-        cpu_exit(env);
+    qemu_notify_event();
 }
 
 static void dma_reset(void *opaque)
Index: trunk/vl.c
===================================================================
--- trunk.orig/vl.c
+++ trunk/vl.c
@@ -1182,9 +1182,8 @@ void qemu_mod_timer(QEMUTimer *ts, int64
             qemu_rearm_alarm_timer(alarm_timer);
         }
         /* Interrupt execution to force deadline recalculation.  */
-        if (use_icount && cpu_single_env) {
-            cpu_exit(cpu_single_env);
-        }
+        if (use_icount)
+            qemu_notify_event();
     }
 }
 
@@ -1337,8 +1336,6 @@ static void host_alarm_handler(int host_
                                qemu_get_clock(vm_clock))) ||
         qemu_timer_expired(active_timers[QEMU_TIMER_REALTIME],
                            qemu_get_clock(rt_clock))) {
-        CPUState *env = next_cpu;
-
 #ifdef _WIN32
         struct qemu_alarm_win32 *data = ((struct qemu_alarm_timer*)dwUser)->priv;
         SetEvent(data->host_alarm);
@@ -1348,16 +1345,7 @@ static void host_alarm_handler(int host_
 #endif
         alarm_timer->flags |= ALARM_FLAG_EXPIRED;
 
-        if (env) {
-            /* stop the currently executing cpu because a timer occured */
-            cpu_exit(env);
-#ifdef USE_KQEMU
-            if (env->kqemu_enabled) {
-                kqemu_cpu_interrupt(env);
-            }
-#endif
-        }
-        event_pending = 1;
+        qemu_notify_event();
     }
 }
 
@@ -3333,15 +3321,7 @@ static int ram_load(QEMUFile *f, void *o
 
 void qemu_service_io(void)
 {
-    CPUState *env = cpu_single_env;
-    if (env) {
-        cpu_exit(env);
-#ifdef USE_KQEMU
-        if (env->kqemu_enabled) {
-            kqemu_cpu_interrupt(env);
-        }
-#endif
-    }
+    qemu_notify_event();
 }
 
 /***********************************************************/
@@ -3409,15 +3389,12 @@ void qemu_bh_schedule_idle(QEMUBH *bh)
 
 void qemu_bh_schedule(QEMUBH *bh)
 {
-    CPUState *env = cpu_single_env;
     if (bh->scheduled)
         return;
     bh->scheduled = 1;
     bh->idle = 0;
     /* stop the currently executing CPU to execute the BH ASAP */
-    if (env) {
-        cpu_exit(env);
-    }
+    qemu_notify_event();
 }
 
 void qemu_bh_cancel(QEMUBH *bh)
@@ -3626,22 +3603,32 @@ void qemu_system_reset_request(void)
     } else {
         reset_requested = 1;
     }
-    if (cpu_single_env)
-        cpu_exit(cpu_single_env);
+    qemu_notify_event();
 }
 
 void qemu_system_shutdown_request(void)
 {
     shutdown_requested = 1;
-    if (cpu_single_env)
-        cpu_exit(cpu_single_env);
+    qemu_notify_event();
 }
 
 void qemu_system_powerdown_request(void)
 {
     powerdown_requested = 1;
-    if (cpu_single_env)
-        cpu_exit(cpu_single_env);
+    qemu_notify_event();
+}
+
+void qemu_notify_event(void)
+{
+    CPUState *env = cpu_single_env;
+
+    if (env) {
+        cpu_exit(env);
+#ifdef USE_KQEMU
+        if (env->kqemu_enabled)
+            kqemu_cpu_interrupt(env);
+#endif
+     }
 }
 
 #ifdef _WIN32
Index: trunk/qemu-common.h
===================================================================
--- trunk.orig/qemu-common.h
+++ trunk/qemu-common.h
@@ -186,6 +186,9 @@ int cpu_load(QEMUFile *f, void *opaque, 
 /* Force QEMU to stop what it's doing and service IO */
 void qemu_service_io(void);
 
+/* Force QEMU to process pending events */
+void qemu_notify_event(void);
+
 typedef struct QEMUIOVector {
     struct iovec *iov;
     int niov;
Index: trunk/hw/mac_dbdma.c
===================================================================
--- trunk.orig/hw/mac_dbdma.c
+++ trunk/hw/mac_dbdma.c
@@ -651,9 +651,7 @@ void DBDMA_register_channel(void *dbdma,
 
 void DBDMA_schedule(void)
 {
-    CPUState *env = cpu_single_env;
-    if (env)
-        cpu_exit(env);
+    qemu_notify_event();
 }
 
 static void

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [Qemu-devel] [patch 02/10] qemu: mutex/thread/cond wrappers
  2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
  2009-03-25 22:47 ` [Qemu-devel] [patch 01/10] qemu: create helper for event notification Marcelo Tosatti
@ 2009-03-25 22:47 ` Marcelo Tosatti
  2009-03-25 23:24   ` Glauber Costa
  2009-03-26 11:01   ` malc
  2009-03-25 22:47 ` [Qemu-devel] [patch 03/10] qemu: per-arch cpu_has_work Marcelo Tosatti
                   ` (8 subsequent siblings)
  10 siblings, 2 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 22:47 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

[-- Attachment #1: iothread-mutex --]
[-- Type: text/plain, Size: 4790 bytes --]

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: trunk/qemu-thread.c
===================================================================
--- /dev/null
+++ trunk/qemu-thread.c
@@ -0,0 +1,123 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <time.h>
+#include <signal.h>
+#include <stdint.h>
+#include "qemu-thread.h"
+
+static void error_exit(const char *msg)
+{
+    perror(msg);
+    exit(1);
+}
+
+void qemu_mutex_init(QemuMutex *mutex)
+{
+    if (pthread_mutex_init(&mutex->lock, NULL))
+        error_exit(__func__);
+}
+
+void qemu_mutex_lock(QemuMutex *mutex)
+{
+    if (pthread_mutex_lock(&mutex->lock))
+        error_exit(__func__);
+}
+
+int qemu_mutex_trylock(QemuMutex *mutex)
+{
+    return pthread_mutex_trylock(&mutex->lock);
+}
+
+static void timespec_add_ms(struct timespec *ts, uint64_t msecs)
+{
+    ts->tv_sec = ts->tv_sec + (long)(msecs / 1000);
+    ts->tv_nsec = (ts->tv_nsec + ((long)msecs % 1000) * 1000000);
+    if (ts->tv_nsec >= 1000000000) {
+        ts->tv_nsec -= 1000000000;
+        ts->tv_sec++;
+    }
+}
+
+int qemu_mutex_timedlock(QemuMutex *mutex, uint64_t msecs)
+{
+    int r;
+    struct timespec ts;
+
+    clock_gettime(CLOCK_REALTIME, &ts);
+    timespec_add_ms(&ts, msecs);
+
+    r = pthread_mutex_timedlock(&mutex->lock, &ts);
+    if (r && r != ETIMEDOUT)
+        error_exit(__func__);
+    return r;
+}
+
+void qemu_mutex_unlock(QemuMutex *mutex)
+{
+    if (pthread_mutex_unlock(&mutex->lock))
+        error_exit(__func__);
+}
+
+void qemu_cond_init(QemuCond *cond)
+{
+    if (pthread_cond_init(&cond->cond, NULL))
+        error_exit(__func__);
+}
+
+void qemu_cond_signal(QemuCond *cond)
+{
+    if (pthread_cond_signal(&cond->cond))
+        error_exit(__func__);
+}
+
+void qemu_cond_broadcast(QemuCond *cond)
+{
+    if (pthread_cond_broadcast(&cond->cond))
+        error_exit(__func__);
+}
+
+void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
+{
+    if (pthread_cond_wait(&cond->cond, &mutex->lock))
+        error_exit(__func__);
+}
+
+int qemu_cond_timedwait(QemuCond *cond, QemuMutex *mutex, uint64_t msecs)
+{
+    struct timespec ts;
+    int r;
+
+    clock_gettime(CLOCK_REALTIME, &ts);
+    timespec_add_ms(&ts, msecs);
+
+    r = pthread_cond_timedwait(&cond->cond, &mutex->lock, &ts);
+    if (r && r != ETIMEDOUT)
+        error_exit(__func__);
+    return r;
+}
+
+void qemu_thread_create(QemuThread *thread,
+                       void *(*start_routine)(void*),
+                       void *arg)
+{
+    if (pthread_create(&thread->thread, NULL, start_routine, arg))
+        error_exit(__func__);
+}
+
+void qemu_thread_signal(QemuThread *thread, int sig)
+{
+    if (pthread_kill(thread->thread, sig))
+        error_exit(__func__);
+}
+
+void qemu_thread_self(QemuThread *thread)
+{
+    thread->thread = pthread_self();
+}
+
+int qemu_thread_equal(QemuThread *thread1, QemuThread *thread2)
+{
+   return (thread1->thread == thread2->thread);
+}
+
Index: trunk/qemu-thread.h
===================================================================
--- /dev/null
+++ trunk/qemu-thread.h
@@ -0,0 +1,40 @@
+#ifndef __QEMU_THREAD_H
+#define __QEMU_THREAD_H 1
+#include "semaphore.h"
+#include "pthread.h"
+
+struct QemuMutex {
+    pthread_mutex_t lock;
+};
+
+struct QemuCond {
+    pthread_cond_t cond;
+};
+
+struct QemuThread {
+    pthread_t thread;
+};
+
+typedef struct QemuMutex QemuMutex;
+typedef struct QemuCond QemuCond;
+typedef struct QemuThread QemuThread;
+
+void qemu_mutex_init(QemuMutex *mutex);
+void qemu_mutex_lock(QemuMutex *mutex);
+int qemu_mutex_trylock(QemuMutex *mutex);
+int qemu_mutex_timedlock(QemuMutex *mutex, uint64_t msecs);
+void qemu_mutex_unlock(QemuMutex *mutex);
+
+void qemu_cond_init(QemuCond *cond);
+void qemu_cond_signal(QemuCond *cond);
+void qemu_cond_broadcast(QemuCond *cond);
+void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex);
+int qemu_cond_timedwait(QemuCond *cond, QemuMutex *mutex, uint64_t msecs);
+
+void qemu_thread_create(QemuThread *thread,
+                       void *(*start_routine)(void*),
+                       void *arg);
+void qemu_thread_signal(QemuThread *thread, int sig);
+void qemu_thread_self(QemuThread *thread);
+int qemu_thread_equal(QemuThread *thread1, QemuThread *thread2);
+#endif
Index: trunk/Makefile.target
===================================================================
--- trunk.orig/Makefile.target
+++ trunk/Makefile.target
@@ -501,6 +501,7 @@ endif #CONFIG_BSD_USER
 ifndef CONFIG_USER_ONLY
 
 OBJS=vl.o osdep.o monitor.o pci.o loader.o isa_mmio.o machine.o dma-helpers.o
+OBJS+=qemu-thread.o
 # virtio has to be here due to weird dependency between PCI and virtio-net.
 # need to fix this properly
 OBJS+=virtio.o virtio-blk.o virtio-balloon.o virtio-net.o virtio-console.o

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [Qemu-devel] [patch 03/10] qemu: per-arch cpu_has_work
  2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
  2009-03-25 22:47 ` [Qemu-devel] [patch 01/10] qemu: create helper for event notification Marcelo Tosatti
  2009-03-25 22:47 ` [Qemu-devel] [patch 02/10] qemu: mutex/thread/cond wrappers Marcelo Tosatti
@ 2009-03-25 22:47 ` Marcelo Tosatti
  2009-03-25 23:26   ` Glauber Costa
  2009-03-28 18:14   ` Blue Swirl
  2009-03-25 22:47 ` [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break Marcelo Tosatti
                   ` (7 subsequent siblings)
  10 siblings, 2 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 22:47 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

[-- Attachment #1: qemu-arch-has-work --]
[-- Type: text/plain, Size: 6879 bytes --]

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: trunk/target-alpha/exec.h
===================================================================
--- trunk.orig/target-alpha/exec.h
+++ trunk/target-alpha/exec.h
@@ -48,10 +48,15 @@ static always_inline void regs_to_env(vo
 {
 }
 
+static always_inline int cpu_has_work(CPUState *env)
+{
+    return (env->interrupt_request & CPU_INTERRUPT_HARD);
+}
+
 static always_inline int cpu_halted(CPUState *env) {
     if (!env->halted)
         return 0;
-    if (env->interrupt_request & CPU_INTERRUPT_HARD) {
+    if (cpu_has_work(env)) {
         env->halted = 0;
         return 0;
     }
Index: trunk/target-i386/exec.h
===================================================================
--- trunk.orig/target-i386/exec.h
+++ trunk/target-i386/exec.h
@@ -338,14 +338,23 @@ static inline void regs_to_env(void)
 #endif
 }
 
+static inline int cpu_has_work(CPUState *env)
+{
+    int work;
+
+    work = (env->interrupt_request & CPU_INTERRUPT_HARD) &&
+           (env->eflags & IF_MASK);
+    work |= env->interrupt_request & CPU_INTERRUPT_NMI;
+
+    return work;
+}
+
 static inline int cpu_halted(CPUState *env) {
     /* handle exit of HALTED state */
     if (!env->halted)
         return 0;
     /* disable halt condition */
-    if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
-         (env->eflags & IF_MASK)) ||
-        (env->interrupt_request & CPU_INTERRUPT_NMI)) {
+    if (cpu_has_work(env)) {
         env->halted = 0;
         return 0;
     }
Index: trunk/cpu-all.h
===================================================================
--- trunk.orig/cpu-all.h
+++ trunk/cpu-all.h
@@ -775,6 +775,8 @@ void cpu_reset_interrupt(CPUState *env, 
 
 void cpu_exit(CPUState *s);
 
+int qemu_cpu_has_work(CPUState *env);
+
 /* Breakpoint/watchpoint flags */
 #define BP_MEM_READ           0x01
 #define BP_MEM_WRITE          0x02
Index: trunk/cpu-exec.c
===================================================================
--- trunk.orig/cpu-exec.c
+++ trunk/cpu-exec.c
@@ -50,6 +50,11 @@ int tb_invalidated_flag;
 //#define DEBUG_EXEC
 //#define DEBUG_SIGNAL
 
+int qemu_cpu_has_work(CPUState *env)
+{
+    return cpu_has_work(env);
+}
+
 void cpu_loop_exit(void)
 {
     /* NOTE: the register at this point must be saved by hand because
Index: trunk/target-arm/exec.h
===================================================================
--- trunk.orig/target-arm/exec.h
+++ trunk/target-arm/exec.h
@@ -37,14 +37,19 @@ static inline void regs_to_env(void)
 {
 }
 
+static inline int cpu_has_work(CPUState *env)
+{
+    return (env->interrupt_request &
+            (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB));
+}
+
 static inline int cpu_halted(CPUState *env) {
     if (!env->halted)
         return 0;
     /* An interrupt wakes the CPU even if the I and F CPSR bits are
        set.  We use EXITTB to silently wake CPU without causing an
        actual interrupt.  */
-    if (env->interrupt_request &
-        (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB)) {
+    if (cpu_has_work(env)) {
         env->halted = 0;
         return 0;
     }
Index: trunk/target-cris/exec.h
===================================================================
--- trunk.orig/target-cris/exec.h
+++ trunk/target-cris/exec.h
@@ -40,6 +40,11 @@ static inline void regs_to_env(void)
 void cpu_cris_flush_flags(CPUCRISState *env, int cc_op);
 void helper_movec(CPUCRISState *env, int reg, uint32_t val);
 
+static inline int cpu_has_work(CPUState *env)
+{
+    return (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI));
+}
+
 static inline int cpu_halted(CPUState *env) {
 	if (!env->halted)
 		return 0;
Index: trunk/target-m68k/exec.h
===================================================================
--- trunk.orig/target-m68k/exec.h
+++ trunk/target-m68k/exec.h
@@ -41,10 +41,15 @@ static inline void regs_to_env(void)
 #include "softmmu_exec.h"
 #endif
 
+static inline int cpu_has_work(CPUState *env)
+{
+    return (env->interrupt_request & (CPU_INTERRUPT_HARD));
+}
+
 static inline int cpu_halted(CPUState *env) {
     if (!env->halted)
         return 0;
-    if (env->interrupt_request & CPU_INTERRUPT_HARD) {
+    if (cpu_has_work(env)) {
         env->halted = 0;
         return 0;
     }
Index: trunk/target-mips/exec.h
===================================================================
--- trunk.orig/target-mips/exec.h
+++ trunk/target-mips/exec.h
@@ -33,12 +33,18 @@ static inline void regs_to_env(void)
 {
 }
 
+static inline int cpu_has_work(CPUState *env)
+{
+    return (env->interrupt_request &
+            (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER));
+}
+
+
 static inline int cpu_halted(CPUState *env)
 {
     if (!env->halted)
         return 0;
-    if (env->interrupt_request &
-        (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
+    if (cpu_has_work(env)) {
         env->halted = 0;
         return 0;
     }
Index: trunk/target-ppc/exec.h
===================================================================
--- trunk.orig/target-ppc/exec.h
+++ trunk/target-ppc/exec.h
@@ -44,11 +44,17 @@ static always_inline void regs_to_env (v
 {
 }
 
+static always_inline int cpu_has_work(CPUState *env)
+{
+    return (msr_ee && (env->interrupt_request & CPU_INTERRUPT_HARD));
+}
+
+
 static always_inline int cpu_halted (CPUState *env)
 {
     if (!env->halted)
         return 0;
-    if (msr_ee && (env->interrupt_request & CPU_INTERRUPT_HARD)) {
+    if (cpu_has_work(env)) {
         env->halted = 0;
         return 0;
     }
Index: trunk/target-sh4/exec.h
===================================================================
--- trunk.orig/target-sh4/exec.h
+++ trunk/target-sh4/exec.h
@@ -28,10 +28,15 @@ register struct CPUSH4State *env asm(ARE
 #include "cpu.h"
 #include "exec-all.h"
 
+static inline int cpu_has_work(CPUState *env)
+{
+    return (env->interrupt_request & CPU_INTERRUPT_HARD);
+}
+
 static inline int cpu_halted(CPUState *env) {
     if (!env->halted)
         return 0;
-    if (env->interrupt_request & CPU_INTERRUPT_HARD) {
+    if (cpu_has_work(env)) {
         env->halted = 0;
         env->intr_at_halt = 1;
         return 0;
Index: trunk/target-sparc/exec.h
===================================================================
--- trunk.orig/target-sparc/exec.h
+++ trunk/target-sparc/exec.h
@@ -24,10 +24,17 @@ static inline void regs_to_env(void)
 /* op_helper.c */
 void do_interrupt(CPUState *env);
 
+static inline int cpu_has_work(CPUState *env)
+{
+    return (env->interrupt_request & CPU_INTERRUPT_HARD) &&
+           (env->psret != 0);
+}
+
+
 static inline int cpu_halted(CPUState *env1) {
     if (!env1->halted)
         return 0;
-    if ((env1->interrupt_request & CPU_INTERRUPT_HARD) && (env1->psret != 0)) {
+    if (cpu_has_work(env)) {
         env1->halted = 0;
         return 0;
     }

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break
  2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
                   ` (2 preceding siblings ...)
  2009-03-25 22:47 ` [Qemu-devel] [patch 03/10] qemu: per-arch cpu_has_work Marcelo Tosatti
@ 2009-03-25 22:47 ` Marcelo Tosatti
  2009-03-26  1:24   ` Glauber Costa
  2009-03-26 12:27   ` Jamie Lokier
  2009-03-25 22:47 ` [Qemu-devel] [patch 05/10] qemu: separate thread for io Marcelo Tosatti
                   ` (6 subsequent siblings)
  10 siblings, 2 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 22:47 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

[-- Attachment #1: pipefd --]
[-- Type: text/plain, Size: 2387 bytes --]

Use a pipe to signal pending work for the iothread.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: trunk/qemu-common.h
===================================================================
--- trunk.orig/qemu-common.h
+++ trunk/qemu-common.h
@@ -186,6 +186,8 @@ int cpu_load(QEMUFile *f, void *opaque, 
 /* Force QEMU to stop what it's doing and service IO */
 void qemu_service_io(void);
 
+void main_loop_break(void);
+
 /* Force QEMU to process pending events */
 void qemu_notify_event(void);
 
Index: trunk/vl.c
===================================================================
--- trunk.orig/vl.c
+++ trunk/vl.c
@@ -275,6 +275,8 @@ static QEMUTimer *nographic_timer;
 
 uint8_t qemu_uuid[16];
 
+static int io_thread_fd = -1;
+
 /***********************************************************/
 /* x86 ISA bus support */
 
@@ -3631,6 +3633,55 @@ void qemu_notify_event(void)
      }
 }
 
+void main_loop_break(void)
+{
+    uint64_t value = 1;
+    char buffer[8];
+    size_t offset = 0;
+
+    if (io_thread_fd == -1)
+        return;
+
+    memcpy(buffer, &value, sizeof(value));
+
+    while (offset < 8) {
+        ssize_t len;
+
+        len = write(io_thread_fd, buffer + offset, 8 - offset);
+        if (len == -1 && errno == EINTR)
+            continue;
+
+        if (len <= 0)
+            break;
+
+        offset += len;
+    }
+
+    if (offset != 8)
+        fprintf(stderr, "failed to notify io thread\n");
+}
+
+/* Used to break IO thread out of select */
+static void io_thread_wakeup(void *opaque)
+{
+    int fd = (unsigned long)opaque;
+    char buffer[8];
+    size_t offset = 0;
+
+    while (offset < 8) {
+        ssize_t len;
+
+        len = read(fd, buffer + offset, 8 - offset);
+        if (len == -1 && errno == EINTR)
+            continue;
+
+        if (len <= 0)
+            break;
+
+        offset += len;
+    }
+}
+
 #ifdef _WIN32
 static void host_main_loop_wait(int *timeout)
 {
@@ -3773,6 +3824,20 @@ void main_loop_wait(int timeout)
 
 }
 
+static void setup_iothread_fd(void)
+{
+    int fds[2];
+
+    if (pipe(fds) == -1) {
+        fprintf(stderr, "failed to create iothread pipe");
+        exit(0);
+    }
+
+    qemu_set_fd_handler2(fds[0], NULL, io_thread_wakeup, NULL,
+                         (void *)(unsigned long)fds[0]);
+    io_thread_fd = fds[1];
+}
+
 static int main_loop(void)
 {
     int ret, timeout;

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [Qemu-devel] [patch 05/10] qemu: separate thread for io
  2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
                   ` (3 preceding siblings ...)
  2009-03-25 22:47 ` [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break Marcelo Tosatti
@ 2009-03-25 22:47 ` Marcelo Tosatti
  2009-03-25 22:47 ` [Qemu-devel] [patch 06/10] qemu: per-cpu thread information Marcelo Tosatti
                   ` (5 subsequent siblings)
  10 siblings, 0 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 22:47 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

[-- Attachment #1: introduce-io-thread --]
[-- Type: text/plain, Size: 8421 bytes --]

Introduce a thread to handle host IO events.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>


Index: trunk/qemu-common.h
===================================================================
--- trunk.orig/qemu-common.h
+++ trunk/qemu-common.h
@@ -191,6 +191,10 @@ void main_loop_break(void);
 /* Force QEMU to process pending events */
 void qemu_notify_event(void);
 
+/* Unblock cpu */
+void qemu_cpu_kick(void *env);
+int qemu_cpu_self(void *env);
+
 typedef struct QEMUIOVector {
     struct iovec *iov;
     int niov;
Index: trunk/vl.c
===================================================================
--- trunk.orig/vl.c
+++ trunk/vl.c
@@ -146,6 +146,7 @@ int main(int argc, char **argv)
 #include "gdbstub.h"
 #include "qemu-timer.h"
 #include "qemu-char.h"
+#include "qemu-thread.h"
 #include "cache-utils.h"
 #include "block.h"
 #include "audio/audio.h"
@@ -277,6 +278,13 @@ uint8_t qemu_uuid[16];
 
 static int io_thread_fd = -1;
 
+QemuMutex qemu_global_mutex;
+QemuMutex qemu_fair_mutex;
+
+QemuThread io_thread;
+QemuThread cpus_thread;
+QemuCond halt_cond;
+
 /***********************************************************/
 /* x86 ISA bus support */
 
@@ -1346,8 +1354,6 @@ static void host_alarm_handler(int host_
         write(alarm_timer_wfd, &byte, sizeof(byte));
 #endif
         alarm_timer->flags |= ALARM_FLAG_EXPIRED;
-
-        qemu_notify_event();
     }
 }
 
@@ -2956,6 +2962,7 @@ int qemu_set_fd_handler2(int fd,
         ioh->opaque = opaque;
         ioh->deleted = 0;
     }
+    main_loop_break();
     return 0;
 }
 
@@ -3323,7 +3330,6 @@ static int ram_load(QEMUFile *f, void *o
 
 void qemu_service_io(void)
 {
-    qemu_notify_event();
 }
 
 /***********************************************************/
@@ -3396,7 +3402,7 @@ void qemu_bh_schedule(QEMUBH *bh)
     bh->scheduled = 1;
     bh->idle = 0;
     /* stop the currently executing CPU to execute the BH ASAP */
-    qemu_notify_event();
+    main_loop_break();
 }
 
 void qemu_bh_cancel(QEMUBH *bh)
@@ -3605,32 +3611,24 @@ void qemu_system_reset_request(void)
     } else {
         reset_requested = 1;
     }
-    qemu_notify_event();
+    main_loop_break();
 }
 
 void qemu_system_shutdown_request(void)
 {
     shutdown_requested = 1;
-    qemu_notify_event();
+    main_loop_break();
 }
 
 void qemu_system_powerdown_request(void)
 {
     powerdown_requested = 1;
-    qemu_notify_event();
+    main_loop_break();
 }
 
 void qemu_notify_event(void)
 {
-    CPUState *env = cpu_single_env;
-
-    if (env) {
-        cpu_exit(env);
-#ifdef USE_KQEMU
-        if (env->kqemu_enabled)
-            kqemu_cpu_interrupt(env);
-#endif
-     }
+    main_loop_break();
 }
 
 void main_loop_break(void)
@@ -3732,6 +3730,105 @@ static void host_main_loop_wait(int *tim
 }
 #endif
 
+static int cpu_has_work(CPUState *env)
+{
+    if (!env->halted)
+        return 1;
+    if (qemu_cpu_has_work(env))
+        return 1;
+    return 0;
+}
+
+static int tcg_has_work(CPUState *env)
+{
+    for (env = first_cpu; env != NULL; env = env->next_cpu)
+        if (cpu_has_work(env))
+            return 1;
+    return 0;
+}
+
+static void qemu_wait_io_event(CPUState *env, int timeout)
+{
+    if (timeout)
+        while (!tcg_has_work(env))
+            qemu_cond_timedwait(&halt_cond, &qemu_global_mutex, timeout);
+
+   qemu_mutex_unlock(&qemu_global_mutex);
+
+    /*
+     * Users of qemu_global_mutex can be starved, having no chance
+     * to acquire it since this path will get to it first.
+     * So use another lock to provide fairness.
+     */
+    qemu_mutex_lock(&qemu_fair_mutex);
+    qemu_mutex_unlock(&qemu_fair_mutex);
+
+    qemu_mutex_lock(&qemu_global_mutex);
+}
+
+void qemu_cpu_kick(void *env)
+{
+    qemu_cond_broadcast(&halt_cond);
+}
+
+int qemu_cpu_self(void *env)
+{
+    return (cpu_single_env != NULL);
+}
+
+static void cpu_signal(int sig)
+{
+    if (cpu_single_env)
+        cpu_exit(cpu_single_env);
+}
+
+static void block_io_signals(void)
+{
+    sigset_t set;
+    struct sigaction sigact;
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR2);
+    sigaddset(&set, SIGIO);
+    sigaddset(&set, SIGALRM);
+    pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR1);
+    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+    memset(&sigact, 0, sizeof(sigact));
+    sigact.sa_handler = cpu_signal;
+    sigaction(SIGUSR1, &sigact, NULL);
+}
+
+static void unblock_io_signals(void)
+{
+    sigset_t set;
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR2);
+    sigaddset(&set, SIGIO);
+    sigaddset(&set, SIGALRM);
+    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR1);
+    pthread_sigmask(SIG_BLOCK, &set, NULL);
+}
+
+static void qemu_signal_lock(unsigned int msecs)
+{
+    qemu_mutex_lock(&qemu_fair_mutex);
+
+    while (qemu_mutex_trylock(&qemu_global_mutex)) {
+        qemu_thread_signal(&cpus_thread, SIGUSR1);
+        if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
+            break;
+    }
+    qemu_mutex_unlock(&qemu_fair_mutex);
+}
+
 void main_loop_wait(int timeout)
 {
     IOHandlerRecord *ioh;
@@ -3774,7 +3871,14 @@ void main_loop_wait(int timeout)
         slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
     }
 #endif
+
+    /*
+     * main_loop_wait() *must* not assume any global state is consistent across
+     * select() invocations.
+     */
+    qemu_mutex_unlock(&qemu_global_mutex);
     ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
+    qemu_signal_lock(100);
     if (ret > 0) {
         IOHandlerRecord **pioh;
 
@@ -3810,9 +3914,11 @@ void main_loop_wait(int timeout)
 #endif
 
     /* vm time timers */
-    if (vm_running && likely(!(cur_cpu->singlestep_enabled & SSTEP_NOTIMER)))
-        qemu_run_timers(&active_timers[QEMU_TIMER_VIRTUAL],
-                        qemu_get_clock(vm_clock));
+    if (vm_running) {
+        if (cur_cpu && likely(!(cur_cpu->singlestep_enabled & SSTEP_NOTIMER)))
+            qemu_run_timers(&active_timers[QEMU_TIMER_VIRTUAL],
+                             qemu_get_clock(vm_clock));
+    }
 
     /* real time timers */
     qemu_run_timers(&active_timers[QEMU_TIMER_REALTIME],
@@ -3836,9 +3942,10 @@ static void setup_iothread_fd(void)
     qemu_set_fd_handler2(fds[0], NULL, io_thread_wakeup, NULL,
                          (void *)(unsigned long)fds[0]);
     io_thread_fd = fds[1];
+    fcntl(io_thread_fd, F_SETFL, O_NONBLOCK);
 }
 
-static int main_loop(void)
+static void *cpu_main_loop(void *arg)
 {
     int ret, timeout;
 #ifdef CONFIG_PROFILER
@@ -3846,7 +3953,12 @@ static int main_loop(void)
 #endif
     CPUState *env;
 
-    cur_cpu = first_cpu;
+    block_io_signals();
+    qemu_thread_self(&cpus_thread);
+
+    qemu_mutex_lock(&qemu_global_mutex);
+
+    cur_cpu = env = first_cpu;
     next_cpu = cur_cpu->next_cpu ?: first_cpu;
     for(;;) {
         if (vm_running) {
@@ -3969,6 +4081,7 @@ static int main_loop(void)
                 timeout = 0;
             }
         } else {
+            env = env->next_cpu ?: first_cpu;
             if (shutdown_requested) {
                 ret = EXCP_INTERRUPT;
                 break;
@@ -3978,13 +4091,31 @@ static int main_loop(void)
 #ifdef CONFIG_PROFILER
         ti = profile_getclock();
 #endif
-        main_loop_wait(timeout);
+        qemu_wait_io_event(env, timeout);
 #ifdef CONFIG_PROFILER
         dev_time += profile_getclock() - ti;
 #endif
     }
     cpu_disable_ticks();
-    return ret;
+    return NULL;
+}
+
+static void main_loop(void)
+{
+    qemu_cond_init(&halt_cond);
+    qemu_mutex_init(&qemu_fair_mutex);
+    qemu_mutex_init(&qemu_global_mutex);
+    qemu_mutex_lock(&qemu_global_mutex);
+
+    qemu_thread_self(&io_thread);
+    setup_iothread_fd();
+
+    unblock_io_signals();
+
+    qemu_thread_create(&cpus_thread, cpu_main_loop, NULL);
+
+    while (1)
+        main_loop_wait(1000);
 }
 
 static void help(int exitcode)
Index: trunk/exec.c
===================================================================
--- trunk.orig/exec.c
+++ trunk/exec.c
@@ -1532,6 +1532,13 @@ void cpu_interrupt(CPUState *env, int ma
     old_mask = env->interrupt_request;
     env->interrupt_request |= mask;
 
+#ifndef CONFIG_USER_ONLY
+    if (!qemu_cpu_self(env)) {
+        qemu_cpu_kick(env);
+        return;
+    }
+#endif
+
     if (use_icount) {
         env->icount_decr.u16.high = 0xffff;
 #ifndef CONFIG_USER_ONLY

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [Qemu-devel] [patch 06/10] qemu: per-cpu thread information
  2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
                   ` (4 preceding siblings ...)
  2009-03-25 22:47 ` [Qemu-devel] [patch 05/10] qemu: separate thread for io Marcelo Tosatti
@ 2009-03-25 22:47 ` Marcelo Tosatti
  2009-03-26  1:35   ` Glauber Costa
  2009-03-25 22:47 ` [Qemu-devel] [patch 07/10] qemu: handle reset/poweroff/shutdown in iothread Marcelo Tosatti
                   ` (4 subsequent siblings)
  10 siblings, 1 reply; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 22:47 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

[-- Attachment #1: percpu-state --]
[-- Type: text/plain, Size: 8646 bytes --]

Move per-cpu thread information to CPUState. Initialize through per-arch
cpu_init.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: trunk/cpu-defs.h
===================================================================
--- trunk.orig/cpu-defs.h
+++ trunk/cpu-defs.h
@@ -158,6 +158,8 @@ typedef struct CPUWatchpoint {
     TAILQ_ENTRY(CPUWatchpoint) entry;
 } CPUWatchpoint;
 
+#include "qemu-thread.h"
+
 #define CPU_TEMP_BUF_NLONGS 128
 #define CPU_COMMON                                                      \
     struct TranslationBlock *current_tb; /* currently executing TB  */  \
@@ -209,6 +211,9 @@ typedef struct CPUWatchpoint {
     /* user data */                                                     \
     void *opaque;                                                       \
                                                                         \
+    uint32_t created;                                                   \
+    struct QemuThread *thread;                                          \
+    struct QemuCond *halt_cond;                                         \
     const char *cpu_model_str;                                          \
     struct KVMState *kvm_state;                                         \
     struct kvm_run *kvm_run;                                            \
Index: trunk/target-i386/helper.c
===================================================================
--- trunk.orig/target-i386/helper.c
+++ trunk/target-i386/helper.c
@@ -1670,5 +1670,8 @@ CPUX86State *cpu_x86_init(const char *cp
 #endif
     if (kvm_enabled())
         kvm_init_vcpu(env);
+
+    qemu_init_vcpu(env);
+
     return env;
 }
Index: trunk/vl.c
===================================================================
--- trunk.orig/vl.c
+++ trunk/vl.c
@@ -282,8 +282,15 @@ QemuMutex qemu_global_mutex;
 QemuMutex qemu_fair_mutex;
 
 QemuThread io_thread;
-QemuThread cpus_thread;
-QemuCond halt_cond;
+
+QemuThread *tcg_cpu_thread;
+QemuCond *tcg_halt_cond;
+
+static int qemu_system_ready;
+/* cpu creation */
+QemuCond qemu_cpu_cond;
+/* system init */
+QemuCond qemu_system_cond;
 
 /***********************************************************/
 /* x86 ISA bus support */
@@ -3751,7 +3758,7 @@ static void qemu_wait_io_event(CPUState 
 {
     if (timeout)
         while (!tcg_has_work(env))
-            qemu_cond_timedwait(&halt_cond, &qemu_global_mutex, timeout);
+            qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, timeout);
 
    qemu_mutex_unlock(&qemu_global_mutex);
 
@@ -3766,9 +3773,10 @@ static void qemu_wait_io_event(CPUState 
     qemu_mutex_lock(&qemu_global_mutex);
 }
 
-void qemu_cpu_kick(void *env)
+void qemu_cpu_kick(void *_env)
 {
-    qemu_cond_broadcast(&halt_cond);
+    CPUState *env = _env;
+    qemu_cond_broadcast(env->halt_cond);
 }
 
 int qemu_cpu_self(void *env)
@@ -3822,7 +3830,7 @@ static void qemu_signal_lock(unsigned in
     qemu_mutex_lock(&qemu_fair_mutex);
 
     while (qemu_mutex_trylock(&qemu_global_mutex)) {
-        qemu_thread_signal(&cpus_thread, SIGUSR1);
+        qemu_thread_signal(tcg_cpu_thread, SIGUSR1);
         if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
             break;
     }
@@ -3951,12 +3959,19 @@ static void *cpu_main_loop(void *arg)
 #ifdef CONFIG_PROFILER
     int64_t ti;
 #endif
-    CPUState *env;
+    CPUState *env = arg;
 
     block_io_signals();
-    qemu_thread_self(&cpus_thread);
+    qemu_thread_self(env->thread);
 
+    /* signal CPU creation */
     qemu_mutex_lock(&qemu_global_mutex);
+    env->created = 1;
+    qemu_cond_signal(&qemu_cpu_cond);
+
+    /* and wait for machine initialization */
+    while (!qemu_system_ready)
+        qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
 
     cur_cpu = env = first_cpu;
     next_cpu = cur_cpu->next_cpu ?: first_cpu;
@@ -4100,19 +4115,41 @@ static void *cpu_main_loop(void *arg)
     return NULL;
 }
 
-static void main_loop(void)
+void qemu_init_vcpu(void *_env)
+{
+    CPUState *env = _env;
+    /* share a single thread for all cpus with TCG */
+    if (!tcg_cpu_thread) {
+        env->thread = qemu_mallocz(sizeof(QemuThread));
+        env->halt_cond = qemu_mallocz(sizeof(QemuCond));
+        qemu_cond_init(env->halt_cond);
+        qemu_thread_create(env->thread, cpu_main_loop, env);
+        while (env->created == 0)
+            qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
+        tcg_cpu_thread = env->thread;
+        tcg_halt_cond = env->halt_cond;
+    } else {
+        env->thread = tcg_cpu_thread;
+        env->halt_cond = tcg_halt_cond;
+    }
+}
+
+static void qemu_init_state(void)
 {
-    qemu_cond_init(&halt_cond);
     qemu_mutex_init(&qemu_fair_mutex);
     qemu_mutex_init(&qemu_global_mutex);
     qemu_mutex_lock(&qemu_global_mutex);
+}
 
+static void main_loop(void)
+{
     qemu_thread_self(&io_thread);
     setup_iothread_fd();
 
     unblock_io_signals();
 
-    qemu_thread_create(&cpus_thread, cpu_main_loop, NULL);
+    qemu_system_ready = 1;
+    qemu_cond_broadcast(&qemu_system_cond);
 
     while (1)
         main_loop_wait(1000);
@@ -5578,6 +5615,7 @@ int main(int argc, char **argv, char **e
     if (smp_cpus > 1)
         kqemu_allowed = 0;
 #endif
+    qemu_init_state();
     linux_boot = (kernel_filename != NULL);
     net_boot = (boot_devices_bitmap >> ('n' - 'a')) & 0xF;
 
Index: trunk/qemu-common.h
===================================================================
--- trunk.orig/qemu-common.h
+++ trunk/qemu-common.h
@@ -195,6 +195,12 @@ void qemu_notify_event(void);
 void qemu_cpu_kick(void *env);
 int qemu_cpu_self(void *env);
 
+#ifdef CONFIG_USER_ONLY
+#define qemu_init_vcpu(env) do { } while (0)
+#else
+void qemu_init_vcpu(void *env);
+#endif
+
 typedef struct QEMUIOVector {
     struct iovec *iov;
     int niov;
Index: trunk/target-arm/helper.c
===================================================================
--- trunk.orig/target-arm/helper.c
+++ trunk/target-arm/helper.c
@@ -267,6 +267,7 @@ CPUARMState *cpu_arm_init(const char *cp
         gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
                                  19, "arm-vfp.xml", 0);
     }
+    qemu_init_vcpu(env);
     return env;
 }
 
Index: trunk/target-m68k/helper.c
===================================================================
--- trunk.orig/target-m68k/helper.c
+++ trunk/target-m68k/helper.c
@@ -180,6 +180,7 @@ CPUM68KState *cpu_m68k_init(const char *
     }
 
     cpu_reset(env);
+    qemu_init_vcpu(env);
     return env;
 }
 
Index: trunk/target-ppc/helper.c
===================================================================
--- trunk.orig/target-ppc/helper.c
+++ trunk/target-ppc/helper.c
@@ -2831,6 +2831,7 @@ CPUPPCState *cpu_ppc_init (const char *c
 
     if (kvm_enabled())
         kvm_init_vcpu(env);
+    qemu_init_vcpu(env);
 
     return env;
 }
Index: trunk/target-sparc/helper.c
===================================================================
--- trunk.orig/target-sparc/helper.c
+++ trunk/target-sparc/helper.c
@@ -723,6 +723,7 @@ CPUSPARCState *cpu_sparc_init(const char
         return NULL;
     }
     cpu_reset(env);
+    qemu_init_vcpu(env);
 
     return env;
 }
Index: trunk/target-alpha/translate.c
===================================================================
--- trunk.orig/target-alpha/translate.c
+++ trunk/target-alpha/translate.c
@@ -2495,6 +2495,7 @@ CPUAlphaState * cpu_alpha_init (const ch
     env->ipr[IPR_SISR] = 0;
     env->ipr[IPR_VIRBND] = -1ULL;
 
+    qemu_init_vcpu(env);
     return env;
 }
 
Index: trunk/target-cris/translate.c
===================================================================
--- trunk.orig/target-cris/translate.c
+++ trunk/target-cris/translate.c
@@ -3404,6 +3404,7 @@ CPUCRISState *cpu_cris_init (const char 
 
 	cpu_exec_init(env);
 	cpu_reset(env);
+	qemu_init_vcpu(env);
 
 	if (tcg_initialized)
 		return env;
Index: trunk/target-mips/translate.c
===================================================================
--- trunk.orig/target-mips/translate.c
+++ trunk/target-mips/translate.c
@@ -8480,6 +8480,7 @@ CPUMIPSState *cpu_mips_init (const char 
     env->cpu_model_str = cpu_model;
     mips_tcg_init();
     cpu_reset(env);
+    qemu_init_vcpu(env);
     return env;
 }
 
Index: trunk/target-sh4/translate.c
===================================================================
--- trunk.orig/target-sh4/translate.c
+++ trunk/target-sh4/translate.c
@@ -288,6 +288,7 @@ CPUSH4State *cpu_sh4_init(const char *cp
     cpu_sh4_reset(env);
     cpu_sh4_register(env, def);
     tlb_flush(env, 1);
+    qemu_init_vcpu(env);
     return env;
 }
 

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [Qemu-devel] [patch 07/10] qemu: handle reset/poweroff/shutdown in iothread
  2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
                   ` (5 preceding siblings ...)
  2009-03-25 22:47 ` [Qemu-devel] [patch 06/10] qemu: per-cpu thread information Marcelo Tosatti
@ 2009-03-25 22:47 ` Marcelo Tosatti
  2009-03-25 22:47 ` [Qemu-devel] [patch 08/10] qemu: pause and resume cpu threads Marcelo Tosatti
                   ` (3 subsequent siblings)
  10 siblings, 0 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 22:47 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

[-- Attachment #1: move-machine-events-to-iothread-2 --]
[-- Type: text/plain, Size: 1938 bytes --]

Its simpler to handle these events from only one context.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: trunk/vl.c
===================================================================
--- trunk.orig/vl.c
+++ trunk/vl.c
@@ -4030,25 +4030,6 @@ static void *cpu_main_loop(void *arg)
             }
             cur_cpu = env;
 
-            if (shutdown_requested) {
-                ret = EXCP_INTERRUPT;
-                if (no_shutdown) {
-                    vm_stop(0);
-                    no_shutdown = 0;
-                }
-                else
-                    break;
-            }
-            if (reset_requested) {
-                reset_requested = 0;
-                qemu_system_reset();
-                ret = EXCP_INTERRUPT;
-            }
-            if (powerdown_requested) {
-                powerdown_requested = 0;
-		qemu_system_powerdown();
-                ret = EXCP_INTERRUPT;
-            }
             if (unlikely(ret == EXCP_DEBUG)) {
                 gdb_set_stop_cpu(cur_cpu);
                 vm_stop(EXCP_DEBUG);
@@ -4097,10 +4078,6 @@ static void *cpu_main_loop(void *arg)
             }
         } else {
             env = env->next_cpu ?: first_cpu;
-            if (shutdown_requested) {
-                ret = EXCP_INTERRUPT;
-                break;
-            }
             timeout = 5000;
         }
 #ifdef CONFIG_PROFILER
@@ -4151,8 +4128,18 @@ static void main_loop(void)
     qemu_system_ready = 1;
     qemu_cond_broadcast(&qemu_system_cond);
 
-    while (1)
+    while (1) {
         main_loop_wait(1000);
+        if (qemu_shutdown_requested()) {
+            if (no_shutdown)
+                no_shutdown = 0;
+            else
+                    break;
+        } else if (qemu_powerdown_requested())
+            qemu_system_powerdown();
+        else if (qemu_reset_requested())
+            qemu_system_reset();
+    }
 }
 
 static void help(int exitcode)

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [Qemu-devel] [patch 08/10] qemu: pause and resume cpu threads
  2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
                   ` (6 preceding siblings ...)
  2009-03-25 22:47 ` [Qemu-devel] [patch 07/10] qemu: handle reset/poweroff/shutdown in iothread Marcelo Tosatti
@ 2009-03-25 22:47 ` Marcelo Tosatti
  2009-03-25 22:47 ` [Qemu-devel] [patch 09/10] qemu: handle vmstop from cpu context Marcelo Tosatti
                   ` (2 subsequent siblings)
  10 siblings, 0 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 22:47 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

[-- Attachment #1: pause-stop-threads-2 --]
[-- Type: text/plain, Size: 5278 bytes --]

Since cpu emulation happens on a separate thread, it is necessary to
pause/resume it upon certain events such as reset, debug exception,
live migration, etc.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: trunk/cpu-defs.h
===================================================================
--- trunk.orig/cpu-defs.h
+++ trunk/cpu-defs.h
@@ -172,6 +172,8 @@ typedef struct CPUWatchpoint {
     target_ulong mem_io_vaddr; /* target virtual addr at which the      \
                                      memory was accessed */             \
     uint32_t halted; /* Nonzero if the CPU is in suspend state */       \
+    uint32_t stop;   /* Stop request */                                 \
+    uint32_t stopped; /* Artificially stopped */                        \
     uint32_t interrupt_request;                                         \
     volatile sig_atomic_t exit_request;                                 \
     /* The meaning of the MMU modes is defined in the target code. */   \
Index: trunk/vl.c
===================================================================
--- trunk.orig/vl.c
+++ trunk/vl.c
@@ -292,6 +292,11 @@ QemuCond qemu_cpu_cond;
 /* system init */
 QemuCond qemu_system_cond;
 
+QemuCond qemu_pause_cond;
+
+static void pause_all_vcpus(void);
+static void resume_all_vcpus(void);
+
 /***********************************************************/
 /* x86 ISA bus support */
 
@@ -3539,6 +3544,7 @@ void vm_start(void)
     if (!vm_running) {
         cpu_enable_ticks();
         vm_running = 1;
+        resume_all_vcpus();
         vm_state_notify(1, 0);
         qemu_rearm_alarm_timer(alarm_timer);
     }
@@ -3549,6 +3555,7 @@ void vm_stop(int reason)
     if (vm_running) {
         cpu_disable_ticks();
         vm_running = 0;
+        pause_all_vcpus();
         vm_state_notify(0, reason);
     }
 }
@@ -3737,8 +3744,29 @@ static void host_main_loop_wait(int *tim
 }
 #endif
 
+/* Q: is it allowed to enter cpu_exec */
+static int cpu_can_run(CPUState *env)
+{
+    if (env->stop)
+        return 0;
+    if (env->stopped)
+        return 0;
+    if (shutdown_requested)
+        return 0;
+    if (powerdown_requested)
+        return 0;
+    if (reset_requested)
+        return 0;
+    return 1;
+}
+
+/* Q: should break out of the wait loop */
 static int cpu_has_work(CPUState *env)
 {
+    if (env->stop)
+        return 1;
+    if (env->stopped)
+        return 0;
     if (!env->halted)
         return 1;
     if (qemu_cpu_has_work(env))
@@ -3771,6 +3799,11 @@ static void qemu_wait_io_event(CPUState 
     qemu_mutex_unlock(&qemu_fair_mutex);
 
     qemu_mutex_lock(&qemu_global_mutex);
+    if (env->stop) {
+        env->stop = 0;
+        env->stopped = 1;
+        qemu_cond_signal(&qemu_pause_cond);
+    }
 }
 
 void qemu_cpu_kick(void *_env)
@@ -3837,6 +3870,53 @@ static void qemu_signal_lock(unsigned in
     qemu_mutex_unlock(&qemu_fair_mutex);
 }
 
+static int all_vcpus_paused(void)
+{
+    CPUState *penv = first_cpu;
+
+    while (penv) {
+        if (penv->stop)
+            return 0;
+        penv = (CPUState *)penv->next_cpu;
+    }
+
+    return 1;
+}
+
+static void pause_all_vcpus(void)
+{
+    CPUState *penv = first_cpu;
+
+    while (penv) {
+        penv->stop = 1;
+        qemu_thread_signal(penv->thread, SIGUSR1);
+        qemu_cpu_kick(penv);
+        penv = (CPUState *)penv->next_cpu;
+    }
+
+    while (!all_vcpus_paused()) {
+        qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
+        penv = first_cpu;
+        while (penv) {
+            qemu_thread_signal(penv->thread, SIGUSR1);
+            penv = (CPUState *)penv->next_cpu;
+        }
+    }
+}
+
+static void resume_all_vcpus(void)
+{
+    CPUState *penv = first_cpu;
+
+    while (penv) {
+        penv->stop = 0;
+        penv->stopped = 0;
+        qemu_thread_signal(penv->thread, SIGUSR1);
+        qemu_cpu_kick(penv);
+        penv = (CPUState *)penv->next_cpu;
+    }
+}
+
 void main_loop_wait(int timeout)
 {
     IOHandlerRecord *ioh;
@@ -3999,7 +4079,9 @@ static void *cpu_main_loop(void *arg)
                     env->icount_decr.u16.low = decr;
                     env->icount_extra = count;
                 }
-                ret = cpu_exec(env);
+                ret = EXCP_HALTED;
+                if (cpu_can_run(env))
+                    ret = cpu_exec(env);
 #ifdef CONFIG_PROFILER
                 qemu_time += profile_getclock() - ti;
 #endif
@@ -4113,6 +4195,7 @@ void qemu_init_vcpu(void *_env)
 
 static void qemu_init_state(void)
 {
+    qemu_cond_init(&qemu_pause_cond);
     qemu_mutex_init(&qemu_fair_mutex);
     qemu_mutex_init(&qemu_global_mutex);
     qemu_mutex_lock(&qemu_global_mutex);
@@ -4131,14 +4214,18 @@ static void main_loop(void)
     while (1) {
         main_loop_wait(1000);
         if (qemu_shutdown_requested()) {
+            pause_all_vcpus();
             if (no_shutdown)
                 no_shutdown = 0;
             else
                     break;
         } else if (qemu_powerdown_requested())
             qemu_system_powerdown();
-        else if (qemu_reset_requested())
+        else if (qemu_reset_requested()) {
+            pause_all_vcpus();
             qemu_system_reset();
+            resume_all_vcpus();
+        }
     }
 }
 

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [Qemu-devel] [patch 09/10] qemu: handle vmstop from cpu context
  2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
                   ` (7 preceding siblings ...)
  2009-03-25 22:47 ` [Qemu-devel] [patch 08/10] qemu: pause and resume cpu threads Marcelo Tosatti
@ 2009-03-25 22:47 ` Marcelo Tosatti
  2009-03-25 22:47 ` [Qemu-devel] [patch 10/10] qemu: basic kvm iothread support Marcelo Tosatti
  2009-03-29 20:16 ` [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Blue Swirl
  10 siblings, 0 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 22:47 UTC (permalink / raw)
  To: qemu-devel

[-- Attachment #1: handle-vmstop-vcpu --]
[-- Type: text/plain, Size: 2303 bytes --]

There are certain cases where cpu context requests a vm stop, such as
-ENOSPC handling.

IMO its simpler to handle vmstop only through the iothread.

Note there is change in behaviour: now the cpu thread which requested vm_stop
will actually only stop when it exits back to cpu_main_loop. It might cause
further damage in between vmstop and cpu_main_loop.

Index: trunk/vl.c
===================================================================
--- trunk.orig/vl.c
+++ trunk/vl.c
@@ -3550,7 +3550,22 @@ void vm_start(void)
     }
 }
 
-void vm_stop(int reason)
+static int vmstop_requested;
+
+static int qemu_vmstop_requested(void)
+{
+    int r = vmstop_requested;
+    vmstop_requested = 0;
+    return r;
+}
+
+static void qemu_system_vmstop_request(int reason)
+{
+    vmstop_requested = reason;
+    main_loop_break();
+}
+
+static void __vm_stop(int reason)
 {
     if (vm_running) {
         cpu_disable_ticks();
@@ -3560,6 +3575,21 @@ void vm_stop(int reason)
     }
 }
 
+void vm_stop(int reason)
+{
+    QemuThread me;
+    qemu_thread_self(&me);
+
+    if (!qemu_thread_equal(&me, &io_thread)) {
+        qemu_system_vmstop_request(reason);
+        /* make sure we can't return to cpu_exec */
+        if (cpu_single_env)
+            cpu_single_env->stop = 1;
+        return;
+    }
+    __vm_stop(reason);
+}
+
 /* reset/shutdown handler */
 
 typedef struct QEMUResetEntry {
@@ -3757,6 +3787,8 @@ static int cpu_can_run(CPUState *env)
         return 0;
     if (reset_requested)
         return 0;
+    if (vmstop_requested)
+        return 0;
     return 1;
 }
 
@@ -4203,6 +4235,8 @@ static void qemu_init_state(void)
 
 static void main_loop(void)
 {
+    int r;
+
     qemu_thread_self(&io_thread);
     setup_iothread_fd();
 
@@ -4219,12 +4253,14 @@ static void main_loop(void)
                 no_shutdown = 0;
             else
                     break;
-        } else if (qemu_powerdown_requested())
+        } else if (qemu_powerdown_requested()) {
             qemu_system_powerdown();
-        else if (qemu_reset_requested()) {
+        } else if (qemu_reset_requested()) {
             pause_all_vcpus();
             qemu_system_reset();
             resume_all_vcpus();
+        } else if ((r = qemu_vmstop_requested())) {
+            vm_stop(r);
         }
     }
 }

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [Qemu-devel] [patch 10/10] qemu: basic kvm iothread support
  2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
                   ` (8 preceding siblings ...)
  2009-03-25 22:47 ` [Qemu-devel] [patch 09/10] qemu: handle vmstop from cpu context Marcelo Tosatti
@ 2009-03-25 22:47 ` Marcelo Tosatti
  2009-03-29 20:16 ` [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Blue Swirl
  10 siblings, 0 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 22:47 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

[-- Attachment #1: kvm --]
[-- Type: text/plain, Size: 4154 bytes --]

Allow the iothread to run while vcpu is in guest mode. To be tuned
with SMP support.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: trunk/kvm-all.c
===================================================================
--- trunk.orig/kvm-all.c
+++ trunk/kvm-all.c
@@ -451,6 +451,7 @@ int kvm_cpu_exec(CPUState *env)
 
     do {
         kvm_arch_pre_run(env, run);
+        kvm_pre_run(env);
 
         if (env->exit_request) {
             dprintf("interrupt exit requested\n");
@@ -459,6 +460,7 @@ int kvm_cpu_exec(CPUState *env)
         }
 
         ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
+        kvm_post_run(env);
         kvm_arch_post_run(env, run);
 
         if (ret == -EINTR || ret == -EAGAIN) {
Index: trunk/kvm.h
===================================================================
--- trunk.orig/kvm.h
+++ trunk/kvm.h
@@ -27,6 +27,10 @@ extern int kvm_allowed;
 
 struct kvm_run;
 
+/* main loop interface, vl.c */
+void kvm_pre_run(CPUState *env);
+void kvm_post_run(CPUState *env);
+
 /* external API */
 
 int kvm_init(int smp_cpus);
Index: trunk/target-i386/helper.c
===================================================================
--- trunk.orig/target-i386/helper.c
+++ trunk/target-i386/helper.c
@@ -1668,9 +1668,6 @@ CPUX86State *cpu_x86_init(const char *cp
 #ifdef USE_KQEMU
     kqemu_init(env);
 #endif
-    if (kvm_enabled())
-        kvm_init_vcpu(env);
-
     qemu_init_vcpu(env);
 
     return env;
Index: trunk/target-ppc/helper.c
===================================================================
--- trunk.orig/target-ppc/helper.c
+++ trunk/target-ppc/helper.c
@@ -2829,8 +2829,6 @@ CPUPPCState *cpu_ppc_init (const char *c
     cpu_ppc_register_internal(env, def);
     cpu_ppc_reset(env);
 
-    if (kvm_enabled())
-        kvm_init_vcpu(env);
     qemu_init_vcpu(env);
 
     return env;
Index: trunk/vl.c
===================================================================
--- trunk.orig/vl.c
+++ trunk/vl.c
@@ -3842,6 +3842,8 @@ void qemu_cpu_kick(void *_env)
 {
     CPUState *env = _env;
     qemu_cond_broadcast(env->halt_cond);
+    if (kvm_enabled())
+        qemu_thread_signal(env->thread, SIGUSR1);
 }
 
 int qemu_cpu_self(void *env)
@@ -3902,6 +3904,16 @@ static void qemu_signal_lock(unsigned in
     qemu_mutex_unlock(&qemu_fair_mutex);
 }
 
+static void qemu_mutex_lock_iothread(void)
+{
+    if (kvm_enabled()) {
+        qemu_mutex_lock(&qemu_fair_mutex);
+        qemu_mutex_lock(&qemu_global_mutex);
+        qemu_mutex_unlock(&qemu_fair_mutex);
+    } else
+        qemu_signal_lock(100);
+}
+
 static int all_vcpus_paused(void)
 {
     CPUState *penv = first_cpu;
@@ -3998,7 +4010,7 @@ void main_loop_wait(int timeout)
      */
     qemu_mutex_unlock(&qemu_global_mutex);
     ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
-    qemu_signal_lock(100);
+    qemu_mutex_lock_iothread();
     if (ret > 0) {
         IOHandlerRecord **pioh;
 
@@ -4206,9 +4218,8 @@ static void *cpu_main_loop(void *arg)
     return NULL;
 }
 
-void qemu_init_vcpu(void *_env)
+static void tcg_init_vcpu(CPUState *env)
 {
-    CPUState *env = _env;
     /* share a single thread for all cpus with TCG */
     if (!tcg_cpu_thread) {
         env->thread = qemu_mallocz(sizeof(QemuThread));
@@ -4225,6 +4236,39 @@ void qemu_init_vcpu(void *_env)
     }
 }
 
+static void kvm_start_vcpu(CPUState *env)
+{
+    kvm_init_vcpu(env);
+    env->thread = qemu_mallocz(sizeof(QemuThread));
+    env->halt_cond = qemu_mallocz(sizeof(QemuCond));
+    qemu_cond_init(env->halt_cond);
+    qemu_thread_create(env->thread, cpu_main_loop, env);
+    while (env->created == 0)
+        qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
+}
+
+void kvm_pre_run(CPUState *env)
+{
+    cpu_single_env = NULL;
+    qemu_mutex_unlock(&qemu_global_mutex);
+}
+
+void kvm_post_run(CPUState *env)
+{
+    qemu_mutex_lock(&qemu_global_mutex);
+    cpu_single_env = env;
+}
+
+void qemu_init_vcpu(void *_env)
+{
+    CPUState *env = _env;
+
+    if (kvm_enabled())
+        kvm_start_vcpu(env);
+    else
+        tcg_init_vcpu(env);
+}
+
 static void qemu_init_state(void)
 {
     qemu_cond_init(&qemu_pause_cond);

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 01/10] qemu: create helper for event notification
  2009-03-25 22:47 ` [Qemu-devel] [patch 01/10] qemu: create helper for event notification Marcelo Tosatti
@ 2009-03-25 23:18   ` Glauber Costa
  2009-03-25 23:27     ` Marcelo Tosatti
  0 siblings, 1 reply; 31+ messages in thread
From: Glauber Costa @ 2009-03-25 23:18 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

> +void qemu_notify_event(void)
> +{
> +    CPUState *env = cpu_single_env;
> +
> +    if (env) {
> +        cpu_exit(env);
> +#ifdef USE_KQEMU
> +        if (env->kqemu_enabled)
> +            kqemu_cpu_interrupt(env);
> +#endif
> +     }
>  }

Have you tested this with kqemu? It will fire kqemu interrupts in a
lot of circunstances
it didn't before. That said, I don't even think kqemu is that
important, but if it's in the tree,
we don't want to introduce regressions. I'd suggest passing a flag
indicating the type of
event we're notifying. It's easy to foresee other uses for that as well.



-- 
Glauber  Costa.
"Free as in Freedom"
http://glommer.net

"The less confident you are, the more serious you have to act."

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 02/10] qemu: mutex/thread/cond wrappers
  2009-03-25 22:47 ` [Qemu-devel] [patch 02/10] qemu: mutex/thread/cond wrappers Marcelo Tosatti
@ 2009-03-25 23:24   ` Glauber Costa
  2009-03-25 23:29     ` Marcelo Tosatti
  2009-03-26 11:01   ` malc
  1 sibling, 1 reply; 31+ messages in thread
From: Glauber Costa @ 2009-03-25 23:24 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

On Wed, Mar 25, 2009 at 7:47 PM, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
>
> Index: trunk/qemu-thread.c
> ===================================================================
> --- /dev/null
> +++ trunk/qemu-thread.c
I think it all fits better as static inline defined in qemu-thread.h file.
It'll surely generate better code.

> +int qemu_mutex_timedlock(QemuMutex *mutex, uint64_t msecs)
> +{
> +    int r;
> +    struct timespec ts;
> +
> +    clock_gettime(CLOCK_REALTIME, &ts);
> +    timespec_add_ms(&ts, msecs);
> +
> +    r = pthread_mutex_timedlock(&mutex->lock, &ts);
> +    if (r && r != ETIMEDOUT)
> +        error_exit(__func__);
> +    return r;
> +}

Do we have in-tree users of that? Although it is a matter of personal taste,
I don't like timed locks. So if we don't have any imediate users, we
might as well
remove it for the sake of simplicity.


-- 
Glauber  Costa.
"Free as in Freedom"
http://glommer.net

"The less confident you are, the more serious you have to act."

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 03/10] qemu: per-arch cpu_has_work
  2009-03-25 22:47 ` [Qemu-devel] [patch 03/10] qemu: per-arch cpu_has_work Marcelo Tosatti
@ 2009-03-25 23:26   ` Glauber Costa
  2009-03-28 18:14   ` Blue Swirl
  1 sibling, 0 replies; 31+ messages in thread
From: Glauber Costa @ 2009-03-25 23:26 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

On Wed, Mar 25, 2009 at 7:47 PM, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
>
> Index: trunk/target-alpha/exec.h
> ===================================================================
> --- trunk.orig/target-alpha/exec.h
> +++ trunk/target-alpha/exec.h

>From my POV, this patch is okay, and can go in regardless of the io-thread work.



-- 
Glauber  Costa.
"Free as in Freedom"
http://glommer.net

"The less confident you are, the more serious you have to act."

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 01/10] qemu: create helper for event notification
  2009-03-25 23:18   ` Glauber Costa
@ 2009-03-25 23:27     ` Marcelo Tosatti
  0 siblings, 0 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 23:27 UTC (permalink / raw)
  To: Glauber Costa; +Cc: qemu-devel

On Wed, Mar 25, 2009 at 08:18:58PM -0300, Glauber Costa wrote:
> > +void qemu_notify_event(void)
> > +{
> > +    CPUState *env = cpu_single_env;
> > +
> > +    if (env) {
> > +        cpu_exit(env);
> > +#ifdef USE_KQEMU
> > +        if (env->kqemu_enabled)
> > +            kqemu_cpu_interrupt(env);
> > +#endif
> > +     }
> >  }
> 
> Have you tested this with kqemu? 

Nope.

> It will fire kqemu interrupts in a lot of circunstances it didn't
> before. That said, I don't even think kqemu is that important, but if
> it's in the tree, we don't want to introduce regressions. I'd suggest
> passing a flag indicating the type of event we're notifying. It's easy
> to foresee other uses for that as well.

qemu_notify_event() basically means "process events handled by
main_loop_wait ASAP". Maybe it needs a better name.

I'll make sure there's no serious regression with kqemu.

There are things like

                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
#if defined(USE_KQEMU)
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
                if (kqemu_is_ok(env) &&
                    (cpu_get_time_fast() - env->last_io_time) >=
MIN_CYCLE_BEFORE_SWITCH) {
                    cpu_loop_exit();
                }
#endif

Which needs some rethinking.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 02/10] qemu: mutex/thread/cond wrappers
  2009-03-25 23:24   ` Glauber Costa
@ 2009-03-25 23:29     ` Marcelo Tosatti
  0 siblings, 0 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-25 23:29 UTC (permalink / raw)
  To: Glauber Costa; +Cc: qemu-devel

On Wed, Mar 25, 2009 at 08:24:51PM -0300, Glauber Costa wrote:
> On Wed, Mar 25, 2009 at 7:47 PM, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> > Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
> >
> > Index: trunk/qemu-thread.c
> > ===================================================================
> > --- /dev/null
> > +++ trunk/qemu-thread.c
> I think it all fits better as static inline defined in qemu-thread.h file.
> It'll surely generate better code.

Sure, can be done.

> > +int qemu_mutex_timedlock(QemuMutex *mutex, uint64_t msecs)
> > +{
> > +    int r;
> > +    struct timespec ts;
> > +
> > +    clock_gettime(CLOCK_REALTIME, &ts);
> > +    timespec_add_ms(&ts, msecs);
> > +
> > +    r = pthread_mutex_timedlock(&mutex->lock, &ts);
> > +    if (r && r != ETIMEDOUT)
> > +        error_exit(__func__);
> > +    return r;
> > +}
> 
> Do we have in-tree users of that? Although it is a matter of personal taste,
> I don't like timed locks. So if we don't have any imediate users, we
> might as well
> remove it for the sake of simplicity.

There are. See patch 5.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break
  2009-03-25 22:47 ` [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break Marcelo Tosatti
@ 2009-03-26  1:24   ` Glauber Costa
  2009-03-26 12:27   ` Jamie Lokier
  1 sibling, 0 replies; 31+ messages in thread
From: Glauber Costa @ 2009-03-26  1:24 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

> +static void setup_iothread_fd(void)
> +{
> +    int fds[2];
> +
> +    if (pipe(fds) == -1) {
> +        fprintf(stderr, "failed to create iothread pipe");
qemu_log() ?

in general, looks good



-- 
Glauber  Costa.
"Free as in Freedom"
http://glommer.net

"The less confident you are, the more serious you have to act."

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 06/10] qemu: per-cpu thread information
  2009-03-25 22:47 ` [Qemu-devel] [patch 06/10] qemu: per-cpu thread information Marcelo Tosatti
@ 2009-03-26  1:35   ` Glauber Costa
  2009-03-26  2:10     ` Marcelo Tosatti
  0 siblings, 1 reply; 31+ messages in thread
From: Glauber Costa @ 2009-03-26  1:35 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

On Wed, Mar 25, 2009 at 7:47 PM, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> Move per-cpu thread information to CPUState. Initialize through per-arch
> cpu_init.

What prevents it to happen in common code?

-- 
Glauber  Costa.
"Free as in Freedom"
http://glommer.net

"The less confident you are, the more serious you have to act."

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 06/10] qemu: per-cpu thread information
  2009-03-26  1:35   ` Glauber Costa
@ 2009-03-26  2:10     ` Marcelo Tosatti
  2009-03-26  2:26       ` Glauber Costa
  0 siblings, 1 reply; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-26  2:10 UTC (permalink / raw)
  To: Glauber Costa; +Cc: qemu-devel

On Wed, Mar 25, 2009 at 10:35:04PM -0300, Glauber Costa wrote:
> On Wed, Mar 25, 2009 at 7:47 PM, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> > Move per-cpu thread information to CPUState. Initialize through per-arch
> > cpu_init.
> 
> What prevents it to happen in common code?

To be accessed in non-cpu context, you mean? Or to be accessed from
linux-user context?

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 06/10] qemu: per-cpu thread information
  2009-03-26  2:10     ` Marcelo Tosatti
@ 2009-03-26  2:26       ` Glauber Costa
  2009-03-26  2:41         ` Marcelo Tosatti
  0 siblings, 1 reply; 31+ messages in thread
From: Glauber Costa @ 2009-03-26  2:26 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: qemu-devel

On Wed, Mar 25, 2009 at 11:10 PM, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> On Wed, Mar 25, 2009 at 10:35:04PM -0300, Glauber Costa wrote:
>> On Wed, Mar 25, 2009 at 7:47 PM, Marcelo Tosatti <mtosatti@redhat.com> wrote:
>> > Move per-cpu thread information to CPUState. Initialize through per-arch
>> > cpu_init.
>>
>> What prevents it to happen in common code?
>
> To be accessed in non-cpu context, you mean? Or to be accessed from
> linux-user context?
non-per-arch



-- 
Glauber  Costa.
"Free as in Freedom"
http://glommer.net

"The less confident you are, the more serious you have to act."

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 06/10] qemu: per-cpu thread information
  2009-03-26  2:26       ` Glauber Costa
@ 2009-03-26  2:41         ` Marcelo Tosatti
  0 siblings, 0 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-03-26  2:41 UTC (permalink / raw)
  To: Glauber Costa; +Cc: qemu-devel

On Wed, Mar 25, 2009 at 11:26:36PM -0300, Glauber Costa wrote:
> On Wed, Mar 25, 2009 at 11:10 PM, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> > On Wed, Mar 25, 2009 at 10:35:04PM -0300, Glauber Costa wrote:
> >> On Wed, Mar 25, 2009 at 7:47 PM, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> >> > Move per-cpu thread information to CPUState. Initialize through per-arch
> >> > cpu_init.
> >>
> >> What prevents it to happen in common code?
> >
> > To be accessed in non-cpu context, you mean? Or to be accessed from
> > linux-user context?
> non-per-arch

Huh?

A informacao e' acessada de codigo comum, vl.c. Nao entendi o problema?

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 02/10] qemu: mutex/thread/cond wrappers
  2009-03-25 22:47 ` [Qemu-devel] [patch 02/10] qemu: mutex/thread/cond wrappers Marcelo Tosatti
  2009-03-25 23:24   ` Glauber Costa
@ 2009-03-26 11:01   ` malc
  1 sibling, 0 replies; 31+ messages in thread
From: malc @ 2009-03-26 11:01 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

On Wed, 25 Mar 2009, Marcelo Tosatti wrote:

> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
> 
> Index: trunk/qemu-thread.c
> ===================================================================
> --- /dev/null
> +++ trunk/qemu-thread.c
> @@ -0,0 +1,123 @@
> +#include <stdlib.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <time.h>
> +#include <signal.h>
> +#include <stdint.h>
> +#include "qemu-thread.h"
> +
> +static void error_exit(const char *msg)
> +{
> +    perror(msg);
> +    exit(1);
> +}
> +
> +void qemu_mutex_init(QemuMutex *mutex)
> +{
> +    if (pthread_mutex_init(&mutex->lock, NULL))
> +        error_exit(__func__);
> +}

This is wrong, pthread functions return the error code and do not
set the errno thus using perror is meaningless.

[..snip..]

-- 
mailto:av1474@comtv.ru

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break
  2009-03-25 22:47 ` [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break Marcelo Tosatti
  2009-03-26  1:24   ` Glauber Costa
@ 2009-03-26 12:27   ` Jamie Lokier
  2009-04-02 23:36     ` Marcelo Tosatti
  1 sibling, 1 reply; 31+ messages in thread
From: Jamie Lokier @ 2009-03-26 12:27 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

Marcelo Tosatti wrote:
> Use a pipe to signal pending work for the iothread.

> +void main_loop_break(void)
> +{
> +    uint64_t value = 1;
> +    char buffer[8];
> +    size_t offset = 0;
> +
> +    if (io_thread_fd == -1)
> +        return;
> +
> +    memcpy(buffer, &value, sizeof(value));
> +
> +    while (offset < 8) {
> +        ssize_t len;
> +
> +        len = write(io_thread_fd, buffer + offset, 8 - offset);
> +        if (len == -1 && errno == EINTR)
> +            continue;
> +
> +        if (len <= 0)
> +            break;
> +
> +        offset += len;
> +    }
> +
> +    if (offset != 8)
> +        fprintf(stderr, "failed to notify io thread\n");

1: Why do you write 8 bytes instead of 1 byte?  If you're thinking of
   passing a value at some point, you could change it later when it's
   needed.  But beware that requiring the pipe to hold all values
   written is what made Netscape 4 lock up on too-new Linux kernels
   some 10 years ago :-)

2: Do you know that writes <= PIPE_BUF are atomic so you don't need
   the offset calculation?

> +/* Used to break IO thread out of select */
> +static void io_thread_wakeup(void *opaque)
> +{
> +    int fd = (unsigned long)opaque;
> +    char buffer[8];
> +    size_t offset = 0;
> +
> +    while (offset < 8) {
> +        ssize_t len;
> +
> +        len = read(fd, buffer + offset, 8 - offset);
> +        if (len == -1 && errno == EINTR)
> +            continue;
> +
> +        if (len <= 0)
> +            break;
> +
> +        offset += len;
> +    }
> +}

You should read until the pipe is empty, in case more than one signal
was raised and called main_loop_break() between calls to
io_thread_wait().

Since reads <= PIPE_BUF are atomic too, the easiest way to do that is
try to read at least one more byte than you wrote per
main_loop_break() call, and if you don't get that many, you've emptied
the pipe.

> +static void setup_iothread_fd(void)
> +{
> +    int fds[2];
> +
> +    if (pipe(fds) == -1) {
> +        fprintf(stderr, "failed to create iothread pipe");
> +        exit(0);
> +    }
> +
> +    qemu_set_fd_handler2(fds[0], NULL, io_thread_wakeup, NULL,
> +                         (void *)(unsigned long)fds[0]);
> +    io_thread_fd = fds[1];
> +}

To avoid deadlock in perverse conditions where lots of signals call
main_loop_break() enough to fill the pipe before it's read, set the
write side non-blocking.

To use the trick of reading more bytes than you expect to detect EOF
without an extra read, set the read side non-blocking too.

-- Jamie

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 03/10] qemu: per-arch cpu_has_work
  2009-03-25 22:47 ` [Qemu-devel] [patch 03/10] qemu: per-arch cpu_has_work Marcelo Tosatti
  2009-03-25 23:26   ` Glauber Costa
@ 2009-03-28 18:14   ` Blue Swirl
  2009-03-29 20:13     ` Blue Swirl
  1 sibling, 1 reply; 31+ messages in thread
From: Blue Swirl @ 2009-03-28 18:14 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

On 3/26/09, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

While the patch looks safe, it breaks Sparc32. I think you have mixed
env1, s and env (which is defined as a fixed host register).

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 03/10] qemu: per-arch cpu_has_work
  2009-03-28 18:14   ` Blue Swirl
@ 2009-03-29 20:13     ` Blue Swirl
  2009-04-02 23:42       ` Marcelo Tosatti
  0 siblings, 1 reply; 31+ messages in thread
From: Blue Swirl @ 2009-03-29 20:13 UTC (permalink / raw)
  To: qemu-devel; +Cc: Marcelo Tosatti

[-- Attachment #1: Type: text/plain, Size: 331 bytes --]

On 3/28/09, Blue Swirl <blauwirbel@gmail.com> wrote:
> On 3/26/09, Marcelo Tosatti <mtosatti@redhat.com> wrote:
>  > Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
>
>
> While the patch looks safe, it breaks Sparc32. I think you have mixed
>  env1, s and env (which is defined as a fixed host register).

This version works.

[-- Attachment #2: patch_03_10_qemu__per-arch_cpu_has_work.diff --]
[-- Type: plain/text, Size: 7274 bytes --]

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 00/10] iothread (candidate for inclusion)
  2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
                   ` (9 preceding siblings ...)
  2009-03-25 22:47 ` [Qemu-devel] [patch 10/10] qemu: basic kvm iothread support Marcelo Tosatti
@ 2009-03-29 20:16 ` Blue Swirl
  10 siblings, 0 replies; 31+ messages in thread
From: Blue Swirl @ 2009-03-29 20:16 UTC (permalink / raw)
  To: qemu-devel

On 3/26/09, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> This version is simplified thanks to the removal of CPU_INTERRUPT_EXIT.
>
>  There are two pending issues: vm_stop() from ENOSPC handling and GDB contexes.
>  IMO those can be worked while testing is performed by developers.
>
>  KVM SMP support is missing, but most of the infra required is present, so
>  it should be relatively simple.
>
>  There's also lots of room for TCG perf improvements.
>
>  Tested with target-x86_64 and target-ppc.

With the updated patch #3, Sparc32 also works.

I think I see a minor performance gain on a dual core host.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break
  2009-03-26 12:27   ` Jamie Lokier
@ 2009-04-02 23:36     ` Marcelo Tosatti
  2009-04-02 23:52       ` Anthony Liguori
  0 siblings, 1 reply; 31+ messages in thread
From: Marcelo Tosatti @ 2009-04-02 23:36 UTC (permalink / raw)
  To: Jamie Lokier; +Cc: qemu-devel

On Thu, Mar 26, 2009 at 12:27:17PM +0000, Jamie Lokier wrote:
> Marcelo Tosatti wrote:
> > Use a pipe to signal pending work for the iothread.
> 
> > +void main_loop_break(void)
> > +{
> > +    uint64_t value = 1;
> > +    char buffer[8];
> > +    size_t offset = 0;
> > +
> > +    if (io_thread_fd == -1)
> > +        return;
> > +
> > +    memcpy(buffer, &value, sizeof(value));
> > +
> > +    while (offset < 8) {
> > +        ssize_t len;
> > +
> > +        len = write(io_thread_fd, buffer + offset, 8 - offset);
> > +        if (len == -1 && errno == EINTR)
> > +            continue;
> > +
> > +        if (len <= 0)
> > +            break;
> > +
> > +        offset += len;
> > +    }
> > +
> > +    if (offset != 8)
> > +        fprintf(stderr, "failed to notify io thread\n");
> 
> 1: Why do you write 8 bytes instead of 1 byte?  If you're thinking of
>    passing a value at some point, you could change it later when it's
>    needed.  But beware that requiring the pipe to hold all values
>    written is what made Netscape 4 lock up on too-new Linux kernels
>    some 10 years ago :-)
> 
> 2: Do you know that writes <= PIPE_BUF are atomic so you don't need
>    the offset calculation?

I didnt know. Anthony wrote this code, so I'll let him comment.

> > +/* Used to break IO thread out of select */
> > +static void io_thread_wakeup(void *opaque)
> > +{
> > +    int fd = (unsigned long)opaque;
> > +    char buffer[8];
> > +    size_t offset = 0;
> > +
> > +    while (offset < 8) {
> > +        ssize_t len;
> > +
> > +        len = read(fd, buffer + offset, 8 - offset);
> > +        if (len == -1 && errno == EINTR)
> > +            continue;
> > +
> > +        if (len <= 0)
> > +            break;
> > +
> > +        offset += len;
> > +    }
> > +}
> 
> You should read until the pipe is empty, in case more than one signal
> was raised and called main_loop_break() between calls to
> io_thread_wait().
> 
> Since reads <= PIPE_BUF are atomic too, the easiest way to do that is
> try to read at least one more byte than you wrote per
> main_loop_break() call, and if you don't get that many, you've emptied
> the pipe.
> 
> > +static void setup_iothread_fd(void)
> > +{
> > +    int fds[2];
> > +
> > +    if (pipe(fds) == -1) {
> > +        fprintf(stderr, "failed to create iothread pipe");
> > +        exit(0);
> > +    }
> > +
> > +    qemu_set_fd_handler2(fds[0], NULL, io_thread_wakeup, NULL,
> > +                         (void *)(unsigned long)fds[0]);
> > +    io_thread_fd = fds[1];
> > +}
> 
> To avoid deadlock in perverse conditions where lots of signals call
> main_loop_break() enough to fill the pipe before it's read, set the
> write side non-blocking.

The pipe is set nonblocking by a later patch. Perhaps the hunk should be 
part of this patch.

> To use the trick of reading more bytes than you expect to detect EOF
> without an extra read, set the read side non-blocking too.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 03/10] qemu: per-arch cpu_has_work
  2009-03-29 20:13     ` Blue Swirl
@ 2009-04-02 23:42       ` Marcelo Tosatti
  0 siblings, 0 replies; 31+ messages in thread
From: Marcelo Tosatti @ 2009-04-02 23:42 UTC (permalink / raw)
  To: qemu-devel, Blue Swirl

On Sun, Mar 29, 2009 at 11:13:04PM +0300, Blue Swirl wrote:
> On 3/28/09, Blue Swirl <blauwirbel@gmail.com> wrote:
> > On 3/26/09, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> >  > Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
> >
> >
> > While the patch looks safe, it breaks Sparc32. I think you have mixed
> >  env1, s and env (which is defined as a fixed host register).
> 
> This version works.

New patchset include this version. Thanks.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break
  2009-04-02 23:36     ` Marcelo Tosatti
@ 2009-04-02 23:52       ` Anthony Liguori
  2009-04-03 14:08         ` Markus Armbruster
  2009-04-07  3:23         ` Jamie Lokier
  0 siblings, 2 replies; 31+ messages in thread
From: Anthony Liguori @ 2009-04-02 23:52 UTC (permalink / raw)
  To: qemu-devel

Marcelo Tosatti wrote:
> I didnt know. Anthony wrote this code, so I'll let him comment.
>   
Is this guaranteed by Posix and reliable across Unixes?  In general, I 
think it's better to be conservative with this sort of thing.

Regards,

Anthony Liguori

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break
  2009-04-02 23:52       ` Anthony Liguori
@ 2009-04-03 14:08         ` Markus Armbruster
  2009-04-07  3:23         ` Jamie Lokier
  1 sibling, 0 replies; 31+ messages in thread
From: Markus Armbruster @ 2009-04-03 14:08 UTC (permalink / raw)
  To: qemu-devel

Anthony Liguori <anthony@codemonkey.ws> writes:

> Marcelo Tosatti wrote:
>> I didnt know. Anthony wrote this code, so I'll let him comment.
>>   
> Is this guaranteed by Posix and reliable across Unixes?  In general, I
> think it's better to be conservative with this sort of thing.

For what it's worth, POSIX.1-2001 guarantees atomicity of up to PIPE_BUF
bytes, and PIPE_BUF must be at least 512.  See pipe(7).  More modern
chapter & verse is online:

http://www.opengroup.org/onlinepubs/009695399/functions/write.html
http://www.opengroup.org/onlinepubs/009695399/basedefs/limits.h.html

A note on select() bugs related to PIPE_BUF in some ancient crap:

http://cr.yp.to/docs/unixport.html

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break
  2009-04-02 23:52       ` Anthony Liguori
  2009-04-03 14:08         ` Markus Armbruster
@ 2009-04-07  3:23         ` Jamie Lokier
  1 sibling, 0 replies; 31+ messages in thread
From: Jamie Lokier @ 2009-04-07  3:23 UTC (permalink / raw)
  To: Anthony Liguori; +Cc: qemu-devel

Anthony Liguori wrote:
> Marcelo Tosatti wrote:
> >I didnt know. Anthony wrote this code, so I'll let him comment.
> >  
> Is this guaranteed by Posix and reliable across Unixes?  In general, I 
> think it's better to be conservative with this sort of thing.

All the changes I suggested don't depend on pipe atomicity.
(My mistake for bringing it up).

As long as you're only writing 1 byte, write atomicity isn't relevant.
(If the current code actually did anything with its 8 byte
chunks it writes, _that_ would depend on atomicity.)

The trick of reading 2 bytes and assuming the pipe is emptied if you
only get 1 byte doesn't dependent on read atomicity, because in the
unlikely event there is another byte, that's ok: the select() will
return immediately and you'll read again anyway.  That can happen even
with atomicity due to parallelism.  So reading 2 bytes is just a
heuristic trick to avoid an unnecessary system call in most cases.

So here's the signal-safe, thread-safe self-pipe trick as I see it:

   - Set both ends of the pipe to non-blocking.

   - Write 1 byte in signal handler, other threads etc.  Don't care if
     you get EAGAIN; it just means the pipe is full, which is fine.
     (As an optimisation you can set a flag and avoid writing a second
     time when you know the pipe is non-empty.  But to do this right,
     you have to be careful with memory ordering etc.  Maybe this is
     not worth it.)

   - Read 2 or more bytes in the select() handler for the read side.
     If you get all the bytes you asked for, loop doing it again.  The
     loop is to drain the pipe, avoiding spurious select() wakeups.
     When you get fewer bytes than you asked for, the pipe is probably
     empty, but if it isn't that's fine.

-- Jamie

^ permalink raw reply	[flat|nested] 31+ messages in thread

end of thread, other threads:[~2009-04-07  3:24 UTC | newest]

Thread overview: 31+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 01/10] qemu: create helper for event notification Marcelo Tosatti
2009-03-25 23:18   ` Glauber Costa
2009-03-25 23:27     ` Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 02/10] qemu: mutex/thread/cond wrappers Marcelo Tosatti
2009-03-25 23:24   ` Glauber Costa
2009-03-25 23:29     ` Marcelo Tosatti
2009-03-26 11:01   ` malc
2009-03-25 22:47 ` [Qemu-devel] [patch 03/10] qemu: per-arch cpu_has_work Marcelo Tosatti
2009-03-25 23:26   ` Glauber Costa
2009-03-28 18:14   ` Blue Swirl
2009-03-29 20:13     ` Blue Swirl
2009-04-02 23:42       ` Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break Marcelo Tosatti
2009-03-26  1:24   ` Glauber Costa
2009-03-26 12:27   ` Jamie Lokier
2009-04-02 23:36     ` Marcelo Tosatti
2009-04-02 23:52       ` Anthony Liguori
2009-04-03 14:08         ` Markus Armbruster
2009-04-07  3:23         ` Jamie Lokier
2009-03-25 22:47 ` [Qemu-devel] [patch 05/10] qemu: separate thread for io Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 06/10] qemu: per-cpu thread information Marcelo Tosatti
2009-03-26  1:35   ` Glauber Costa
2009-03-26  2:10     ` Marcelo Tosatti
2009-03-26  2:26       ` Glauber Costa
2009-03-26  2:41         ` Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 07/10] qemu: handle reset/poweroff/shutdown in iothread Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 08/10] qemu: pause and resume cpu threads Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 09/10] qemu: handle vmstop from cpu context Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 10/10] qemu: basic kvm iothread support Marcelo Tosatti
2009-03-29 20:16 ` [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Blue Swirl

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).