qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] [patch 0/2] RFC: separate thread for IO
@ 2009-03-11 16:16 Marcelo Tosatti
  2009-03-11 16:16 ` [Qemu-devel] [patch 1/2] qemu: sem/thread helpers Marcelo Tosatti
  2009-03-11 16:16 ` [Qemu-devel] [patch 2/2] qemu: separate thread for io Marcelo Tosatti
  0 siblings, 2 replies; 12+ messages in thread
From: Marcelo Tosatti @ 2009-03-11 16:16 UTC (permalink / raw)
  To: qemu-devel; +Cc: Anthony Liguori

Introduce a separate thread for handling IO. Not intended for
inclusion, but should be something for people to comment on.

-- 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [Qemu-devel] [patch 1/2] qemu: sem/thread helpers
  2009-03-11 16:16 [Qemu-devel] [patch 0/2] RFC: separate thread for IO Marcelo Tosatti
@ 2009-03-11 16:16 ` Marcelo Tosatti
  2009-03-11 16:33   ` [Qemu-devel] " Anthony Liguori
                     ` (2 more replies)
  2009-03-11 16:16 ` [Qemu-devel] [patch 2/2] qemu: separate thread for io Marcelo Tosatti
  1 sibling, 3 replies; 12+ messages in thread
From: Marcelo Tosatti @ 2009-03-11 16:16 UTC (permalink / raw)
  To: qemu-devel; +Cc: Anthony Liguori

[-- Attachment #1: iothread-mutex --]
[-- Type: text/plain, Size: 4301 bytes --]

Index: qemu/Makefile.target
===================================================================
--- qemu.orig/Makefile.target
+++ qemu/Makefile.target
@@ -500,7 +500,7 @@ endif #CONFIG_BSD_USER
 # System emulator target
 ifndef CONFIG_USER_ONLY
 
-OBJS=vl.o osdep.o monitor.o pci.o loader.o isa_mmio.o machine.o
+OBJS=vl.o qemu-thread.o osdep.o monitor.o pci.o loader.o isa_mmio.o machine.o
 # virtio has to be here due to weird dependency between PCI and virtio-net.
 # need to fix this properly
 OBJS+=virtio.o virtio-blk.o virtio-balloon.o virtio-net.o virtio-console.o
Index: qemu/vl.c
===================================================================
--- qemu.orig/vl.c
+++ qemu/vl.c
@@ -36,6 +36,7 @@
 #include "gdbstub.h"
 #include "qemu-timer.h"
 #include "qemu-char.h"
+#include "qemu-thread.h"
 #include "cache-utils.h"
 #include "block.h"
 #include "audio/audio.h"
@@ -263,6 +264,8 @@ static QEMUTimer *nographic_timer;
 
 uint8_t qemu_uuid[16];
 
+QemuSem qemu_sem;
+
 /***********************************************************/
 /* x86 ISA bus support */
 
@@ -3650,7 +3653,10 @@ void main_loop_wait(int timeout)
         slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
     }
 #endif
+
+    qemu_sem_unlock(&qemu_sem);
     ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
+    qemu_sem_lock(&qemu_sem);
     if (ret > 0) {
         IOHandlerRecord **pioh;
 
@@ -3708,6 +3714,9 @@ static int main_loop(void)
 #endif
     CPUState *env;
 
+    qemu_sem_init(&qemu_sem);
+    qemu_sem_lock(&qemu_sem);
+
     cur_cpu = first_cpu;
     next_cpu = cur_cpu->next_cpu ?: first_cpu;
     for(;;) {
Index: qemu/qemu-thread.c
===================================================================
--- /dev/null
+++ qemu/qemu-thread.c
@@ -0,0 +1,78 @@
+#include <errno.h>
+#include <time.h>
+#include <signal.h>
+#include "qemu-thread.h"
+
+int qemu_sem_init(QemuSem *sem)
+{
+    return sem_init(&sem->sema, 0, 1);
+}
+
+int qemu_sem_lock(QemuSem *sem)
+{
+    int r;
+
+    while ((r = sem_wait(&sem->sema)) == -1 && errno == EINTR)
+        continue;
+
+    return r;
+}
+
+int qemu_sem_trylock(QemuSem *sem)
+{
+    return sem_trywait(&sem->sema);
+}
+
+static void add_to_timespec(struct timespec *ts, unsigned int msecs)
+{
+    ts->tv_sec = ts->tv_sec + (long)(msecs / 1000);
+    ts->tv_nsec = (ts->tv_nsec + ((long)msecs % 1000) * 1000000);
+    if (ts->tv_nsec >= 1000000000) {
+        ts->tv_nsec -= 1000000000;
+        ts->tv_sec++;
+    }
+}
+
+int qemu_sem_timedlock(QemuSem *sem, unsigned int msecs)
+{
+    int r;
+    struct timespec ts;
+
+    clock_gettime(CLOCK_REALTIME, &ts);
+    add_to_timespec(&ts, msecs);
+
+    while ((r = sem_timedwait(&sem->sema, &ts)) == -1 && errno == EINTR)
+        continue;
+
+    return r;
+}
+
+int qemu_sem_unlock(QemuSem *sem)
+{
+    return sem_post(&sem->sema);
+}
+
+int qemu_thread_create(QemuThread *thread,
+                       void *(*start_routine)(void*),
+                       void *arg)
+{
+    return pthread_create(&thread->thread, NULL, start_routine, arg);
+}
+
+int qemu_thread_signal(QemuThread *thread, int sig)
+{
+    if (thread->thread != 0)
+        return pthread_kill(thread->thread, sig);
+    return -1; /* XXX: ESCHR */
+}
+
+void qemu_thread_self(QemuThread *thread)
+{
+    thread->thread = pthread_self();
+}
+
+int qemu_thread_equal(QemuThread *thread1, QemuThread *thread2)
+{
+   return (thread1->thread == thread2->thread);
+}
+
Index: qemu/qemu-thread.h
===================================================================
--- /dev/null
+++ qemu/qemu-thread.h
@@ -0,0 +1,27 @@
+#include "semaphore.h"
+#include "pthread.h"
+
+struct QemuSem {
+    sem_t sema;
+};
+
+struct QemuThread {
+    pthread_t thread;
+};
+
+typedef struct QemuSem QemuSem;
+typedef struct QemuThread QemuThread;
+
+int qemu_sem_init(QemuSem *sem);
+int qemu_sem_lock(QemuSem *sem);
+int qemu_sem_trylock(QemuSem *sem);
+int qemu_sem_timedlock(QemuSem *sem, unsigned int msecs);
+int qemu_sem_unlock(QemuSem *sem);
+
+int qemu_thread_create(QemuThread *thread,
+                       void *(*start_routine)(void*),
+                       void *arg);
+int qemu_thread_signal(QemuThread *thread, int sig);
+void qemu_thread_self(QemuThread *thread);
+int qemu_thread_equal(QemuThread *thread1, QemuThread *thread2);
+

-- 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [Qemu-devel] [patch 2/2] qemu: separate thread for io
  2009-03-11 16:16 [Qemu-devel] [patch 0/2] RFC: separate thread for IO Marcelo Tosatti
  2009-03-11 16:16 ` [Qemu-devel] [patch 1/2] qemu: sem/thread helpers Marcelo Tosatti
@ 2009-03-11 16:16 ` Marcelo Tosatti
  1 sibling, 0 replies; 12+ messages in thread
From: Marcelo Tosatti @ 2009-03-11 16:16 UTC (permalink / raw)
  To: qemu-devel; +Cc: Anthony Liguori

[-- Attachment #1: introduce-iothread --]
[-- Type: text/plain, Size: 6781 bytes --]


Index: qemu/exec.c
===================================================================
--- qemu.orig/exec.c
+++ qemu/exec.c
@@ -1513,6 +1513,20 @@ void cpu_interrupt(CPUState *env, int ma
     /* FIXME: This is probably not threadsafe.  A different thread could
        be in the middle of a read-modify-write operation.  */
     env->interrupt_request |= mask;
+
+    switch(mask) {
+    case CPU_INTERRUPT_HARD:
+    case CPU_INTERRUPT_SMI:
+    case CPU_INTERRUPT_NMI:
+    case CPU_INTERRUPT_EXIT:
+    /*
+     * only unlink the TB's if we're called from cpu thread context,
+     * otherwise signal cpu thread to do it.
+     */
+        if (qemu_notify_event(env))
+               return;
+    }
+
 #if defined(USE_NPTL)
     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
        problem and hope the cpu will stop of its own accord.  For userspace
Index: qemu/qemu-common.h
===================================================================
--- qemu.orig/qemu-common.h
+++ qemu/qemu-common.h
@@ -190,6 +190,8 @@ int cpu_load(QEMUFile *f, void *opaque, 
 
 /* Force QEMU to stop what it's doing and service IO */
 void qemu_service_io(void);
+void main_loop_break(void);
+int qemu_notify_event(void *env);
 
 typedef struct QEMUIOVector {
     struct iovec *iov;
Index: qemu/vl.c
===================================================================
--- qemu.orig/vl.c
+++ qemu/vl.c
@@ -265,6 +265,8 @@ static QEMUTimer *nographic_timer;
 uint8_t qemu_uuid[16];
 
 QemuSem qemu_sem;
+QemuThread qemu_io_thread;
+QemuThread cpus_thread;
 
 /***********************************************************/
 /* x86 ISA bus support */
@@ -1328,7 +1330,6 @@ static void host_alarm_handler(int host_
                                qemu_get_clock(vm_clock))) ||
         qemu_timer_expired(active_timers[QEMU_TIMER_REALTIME],
                            qemu_get_clock(rt_clock))) {
-        CPUState *env = next_cpu;
 
 #ifdef _WIN32
         struct qemu_alarm_win32 *data = ((struct qemu_alarm_timer*)dwUser)->priv;
@@ -1339,15 +1340,6 @@ static void host_alarm_handler(int host_
 #endif
         alarm_timer->flags |= ALARM_FLAG_EXPIRED;
 
-        if (env) {
-            /* stop the currently executing cpu because a timer occured */
-            cpu_interrupt(env, CPU_INTERRUPT_EXIT);
-#ifdef USE_KQEMU
-            if (env->kqemu_enabled) {
-                kqemu_cpu_interrupt(env);
-            }
-#endif
-        }
         event_pending = 1;
     }
 }
@@ -2878,6 +2870,7 @@ int qemu_set_fd_handler2(int fd,
         ioh->opaque = opaque;
         ioh->deleted = 0;
     }
+    main_loop_break();
     return 0;
 }
 
@@ -3334,6 +3327,7 @@ void qemu_bh_schedule(QEMUBH *bh)
     if (env) {
         cpu_interrupt(env, CPU_INTERRUPT_EXIT);
     }
+    main_loop_break();
 }
 
 void qemu_bh_cancel(QEMUBH *bh)
@@ -3611,6 +3605,138 @@ static void host_main_loop_wait(int *tim
 }
 #endif
 
+static int wait_signal(int timeout)
+{
+    struct timespec ts;
+    sigset_t waitset;
+
+    if (!timeout)
+        timeout = 1;
+
+    ts.tv_sec = timeout / 1000;
+    ts.tv_nsec = (timeout % 1000) * 1000000;
+    sigemptyset(&waitset);
+    sigaddset(&waitset, SIGUSR1);
+
+    return sigtimedwait(&waitset, NULL, &ts);
+}
+
+static int has_work(CPUState *env)
+{
+    int r = 0;
+    if (!env->halted)
+        r = 1;
+    if (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI))
+        r = 1;
+    return r;
+}
+
+static void qemu_wait_io_event(CPUState *env, int timeout)
+{
+    qemu_sem_unlock(&qemu_sem);
+
+    if (!has_work(env))
+        wait_signal(timeout);
+    /*
+     * FIXME: sem_post only wakes up the waiting thread, there is no
+     * guarantee it has acquired the semaphore. Need synchronization, perhaps
+     * with pthread conditional, instead of usleep(1).
+     */
+    else
+        usleep(1);
+
+    qemu_sem_lock(&qemu_sem);
+}
+
+static void cpu_signal(int sig)
+{
+    QemuThread self;
+    CPUState *env = cpu_single_env;
+
+    event_pending = 1;
+
+    qemu_thread_self(&self);
+    if (!qemu_thread_equal(&self, &cpus_thread))
+        return;
+
+    if (env) {
+            /* stop the currently executing cpu because an event occurred */
+        cpu_interrupt(env, CPU_INTERRUPT_EXIT);
+#ifdef USE_KQEMU
+        if (env->kqemu_enabled) {
+            kqemu_cpu_interrupt(env);
+        }
+#endif
+    }
+}
+
+static void block_io_signals(void)
+{
+    sigset_t set;
+    struct sigaction sigact;
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR2);
+    sigaddset(&set, SIGIO);
+    sigaddset(&set, SIGALRM);
+    pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR1);
+    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+    memset(&sigact, 0, sizeof(sigact));
+    sigact.sa_handler = cpu_signal;
+    sigaction(SIGUSR1, &sigact, NULL);
+}
+
+static void unblock_io_signals(void)
+{
+    sigset_t set;
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR2);
+    sigaddset(&set, SIGIO);
+    sigaddset(&set, SIGALRM);
+    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR1);
+    pthread_sigmask(SIG_BLOCK, &set, NULL);
+}
+
+int qemu_notify_event(void *cpu_env)
+{
+    QemuThread me;
+    qemu_thread_self(&me);
+
+    if (qemu_thread_equal(&cpus_thread, &me))
+        return 0;
+    qemu_thread_signal(&cpus_thread, SIGUSR1);
+    return 1;
+}
+
+void main_loop_break(void)
+{
+}
+
+static void *io_thread_fn(void *arg)
+{
+    unblock_io_signals();
+    qemu_sem_lock(&qemu_sem);
+    while (1)
+        main_loop_wait(1000);
+}
+
+static void qemu_signal_lock(QemuSem *sem, unsigned int msecs)
+{
+    while (qemu_sem_trylock(sem)) {
+        qemu_thread_signal(&cpus_thread, SIGUSR1);
+        if (!qemu_sem_timedlock(&qemu_sem, msecs))
+            break;
+    }
+}
+
 void main_loop_wait(int timeout)
 {
     IOHandlerRecord *ioh;
@@ -3656,7 +3782,7 @@ void main_loop_wait(int timeout)
 
     qemu_sem_unlock(&qemu_sem);
     ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
-    qemu_sem_lock(&qemu_sem);
+    qemu_signal_lock(&qemu_sem, 100);
     if (ret > 0) {
         IOHandlerRecord **pioh;
 
@@ -3717,6 +3843,11 @@ static int main_loop(void)
     qemu_sem_init(&qemu_sem);
     qemu_sem_lock(&qemu_sem);
 
+    qemu_thread_create(&qemu_io_thread, io_thread_fn, NULL);
+    block_io_signals();
+
+    qemu_thread_self(&cpus_thread);
+
     cur_cpu = first_cpu;
     next_cpu = cur_cpu->next_cpu ?: first_cpu;
     for(;;) {
@@ -3849,7 +3980,7 @@ static int main_loop(void)
 #ifdef CONFIG_PROFILER
         ti = profile_getclock();
 #endif
-        main_loop_wait(timeout);
+        qemu_wait_io_event(env, timeout);
 #ifdef CONFIG_PROFILER
         dev_time += profile_getclock() - ti;
 #endif

-- 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [Qemu-devel] Re: [patch 1/2] qemu: sem/thread helpers
  2009-03-11 16:16 ` [Qemu-devel] [patch 1/2] qemu: sem/thread helpers Marcelo Tosatti
@ 2009-03-11 16:33   ` Anthony Liguori
  2009-03-11 16:48   ` [Qemu-devel] " Paul Brook
  2009-03-15 14:15   ` Avi Kivity
  2 siblings, 0 replies; 12+ messages in thread
From: Anthony Liguori @ 2009-03-11 16:33 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: qemu-devel

Marcelo Tosatti wrote:
> Index: qemu/Makefile.target
> ===================================================================
> --- qemu.orig/Makefile.target
> +++ qemu/Makefile.target
> @@ -500,7 +500,7 @@ endif #CONFIG_BSD_USER
>  # System emulator target
>  ifndef CONFIG_USER_ONLY
>
> -OBJS=vl.o osdep.o monitor.o pci.o loader.o isa_mmio.o machine.o
> +OBJS=vl.o qemu-thread.o osdep.o monitor.o pci.o loader.o isa_mmio.o machine.o
>  # virtio has to be here due to weird dependency between PCI and virtio-net.
>  # need to fix this properly
>  OBJS+=virtio.o virtio-blk.o virtio-balloon.o virtio-net.o virtio-console.o
> Index: qemu/vl.c
> ===================================================================
> --- qemu.orig/vl.c
> +++ qemu/vl.c
> @@ -36,6 +36,7 @@
>  #include "gdbstub.h"
>  #include "qemu-timer.h"
>  #include "qemu-char.h"
> +#include "qemu-thread.h"
>  #include "cache-utils.h"
>  #include "block.h"
>  #include "audio/audio.h"
> @@ -263,6 +264,8 @@ static QEMUTimer *nographic_timer;
>
>  uint8_t qemu_uuid[16];
>
> +QemuSem qemu_sem;
> +
>  /***********************************************************/
>  /* x86 ISA bus support */
>
> @@ -3650,7 +3653,10 @@ void main_loop_wait(int timeout)
>          slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
>      }
>  #endif
> +
> +    qemu_sem_unlock(&qemu_sem);
>      ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
> +    qemu_sem_lock(&qemu_sem);
>   

I've looked at this a number of times, so I feel comfortable with this, 
but I think it's subtle enough that it deserves a comment.

main_loop_wait() *must* not assume any global state is consistent across 
select() invocations.  We should make that very clear as it's a detail 
that can be easily missed.

Regards,

Anthony Liguori

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [patch 1/2] qemu: sem/thread helpers
  2009-03-11 16:16 ` [Qemu-devel] [patch 1/2] qemu: sem/thread helpers Marcelo Tosatti
  2009-03-11 16:33   ` [Qemu-devel] " Anthony Liguori
@ 2009-03-11 16:48   ` Paul Brook
  2009-03-11 16:56     ` Anthony Liguori
                       ` (2 more replies)
  2009-03-15 14:15   ` Avi Kivity
  2 siblings, 3 replies; 12+ messages in thread
From: Paul Brook @ 2009-03-11 16:48 UTC (permalink / raw)
  To: qemu-devel; +Cc: Anthony Liguori, Marcelo Tosatti

> +int qemu_sem_init(QemuSem *sem);
> +int qemu_sem_lock(QemuSem *sem);

Please don't introduce yet annother set of locking routines.

Also, your function names suggest you actually want a mutex, not a semaphore.

> +int qemu_sem_lock(QemuSem *sem)
> +{

This is bogus. You never check the return value.

> +QemuSem qemu_sem;

It's entirely unclear what is actually protected by the semaphore.

What exactly does the IO thread do? AFAICS device MMIO is still run from 
within the CPU thread. Device code is not threadsafe (and probably never will 
be), so you can't run any of the device callbacks in the IO thread either. 
Doesn't seem like there's a lot left for it to do...

Paul

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [patch 1/2] qemu: sem/thread helpers
  2009-03-11 16:48   ` [Qemu-devel] " Paul Brook
@ 2009-03-11 16:56     ` Anthony Liguori
  2009-03-11 16:58     ` Marcelo Tosatti
  2009-03-18 18:47     ` Marcelo Tosatti
  2 siblings, 0 replies; 12+ messages in thread
From: Anthony Liguori @ 2009-03-11 16:56 UTC (permalink / raw)
  To: Paul Brook; +Cc: Marcelo Tosatti, qemu-devel

Paul Brook wrote:
>> +QemuSem qemu_sem;
>>     
>
> It's entirely unclear what is actually protected by the semaphore.
>
> What exactly does the IO thread do? AFAICS device MMIO is still run from 
> within the CPU thread. Device code is not threadsafe (and probably never will 
> be), so you can't run any of the device callbacks in the IO thread either. 
> Doesn't seem like there's a lot left for it to do...
>   

The goal is to drop qemu_mutex while the VCPU is running to allow the 
device model to run while the VCPU executes.  This is not currently safe 
with TCG but it is safe when using KVM.

There are a couple reasons to drop qemu_mutex while running the VCPU.  
When using KVM's in-kernel APIC, hlt emulation occurs within the 
kernel.  This means that the KVM_RUN ioctl blocks indefinitely.  We 
currently don't use in-kernel APIC emulation in upstream QEMU's KVM 
support as this set of patches is a pre-requisite for that.

This also enables true SMP support in KVM.   You can allow multiple 
VCPUs to run concurrently once you're dropping qemu_mutex during VCPU 
execution.

The VCPU threads have to acquire qemu_mutex once they drop back to QEMU 
(to handle MMIO, for instance), but this only happens during IO.

 From an infrastructure perspective, this is a feature for KVM but also 
a step in a better direction for TCG too.  If TCG can be made to allow 
qemu_mutex to be dropped (perhaps for x86->x86 translation, as a start), 
then the same infrastructure can be used for true SMP support with TCG.

Once we have this, we can start making some of the device model code 
thread safe too...

Regards,

Anthony Liguori

Regards,

Anthony Liguori

> Paul
>   

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [patch 1/2] qemu: sem/thread helpers
  2009-03-11 16:48   ` [Qemu-devel] " Paul Brook
  2009-03-11 16:56     ` Anthony Liguori
@ 2009-03-11 16:58     ` Marcelo Tosatti
  2009-03-18 18:47     ` Marcelo Tosatti
  2 siblings, 0 replies; 12+ messages in thread
From: Marcelo Tosatti @ 2009-03-11 16:58 UTC (permalink / raw)
  To: Paul Brook; +Cc: Anthony Liguori, qemu-devel

On Wed, Mar 11, 2009 at 04:48:01PM +0000, Paul Brook wrote:
> > +QemuSem qemu_sem;
> 
> It's entirely unclear what is actually protected by the semaphore.

Everything at the moment.

> What exactly does the IO thread do? AFAICS device MMIO is still run from 
> within the CPU thread. Device code is not threadsafe (and probably never will 
> be), so you can't run any of the device callbacks in the IO thread either. 
> Doesn't seem like there's a lot left for it to do...

There are things which can run in parallel. VNC dirty update, read()'ing
of data into QEMU, for two examples.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [patch 1/2] qemu: sem/thread helpers
  2009-03-11 16:16 ` [Qemu-devel] [patch 1/2] qemu: sem/thread helpers Marcelo Tosatti
  2009-03-11 16:33   ` [Qemu-devel] " Anthony Liguori
  2009-03-11 16:48   ` [Qemu-devel] " Paul Brook
@ 2009-03-15 14:15   ` Avi Kivity
  2009-03-17 17:42     ` Marcelo Tosatti
  2 siblings, 1 reply; 12+ messages in thread
From: Avi Kivity @ 2009-03-15 14:15 UTC (permalink / raw)
  To: qemu-devel; +Cc: Anthony Liguori

Marcelo Tosatti wrote:
> +
> +struct QemuSem {
> +    sem_t sema;
> +};
>   

Why aren't you using pthread_mutex_t?


-- 
error compiling committee.c: too many arguments to function

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [patch 1/2] qemu: sem/thread helpers
  2009-03-15 14:15   ` Avi Kivity
@ 2009-03-17 17:42     ` Marcelo Tosatti
  2009-03-17 23:07       ` Paul Brook
  0 siblings, 1 reply; 12+ messages in thread
From: Marcelo Tosatti @ 2009-03-17 17:42 UTC (permalink / raw)
  To: qemu-devel; +Cc: Anthony Liguori

On Sun, Mar 15, 2009 at 04:15:52PM +0200, Avi Kivity wrote:
> Marcelo Tosatti wrote:
>> +
>> +struct QemuSem {
>> +    sem_t sema;
>> +};
>>   
>
> Why aren't you using pthread_mutex_t?

Because we want the thread waiting on the lock to be awakened
once the holder releases it. This does not happen with pthread
mutexes AFAIK.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [patch 1/2] qemu: sem/thread helpers
  2009-03-17 17:42     ` Marcelo Tosatti
@ 2009-03-17 23:07       ` Paul Brook
  2009-03-17 23:43         ` Marcelo Tosatti
  0 siblings, 1 reply; 12+ messages in thread
From: Paul Brook @ 2009-03-17 23:07 UTC (permalink / raw)
  To: qemu-devel; +Cc: Anthony Liguori, Marcelo Tosatti

On Tuesday 17 March 2009, Marcelo Tosatti wrote:
> On Sun, Mar 15, 2009 at 04:15:52PM +0200, Avi Kivity wrote:
> > Marcelo Tosatti wrote:
> >> +
> >> +struct QemuSem {
> >> +    sem_t sema;
> >> +};
> >
> > Why aren't you using pthread_mutex_t?
>
> Because we want the thread waiting on the lock to be awakened
> once the holder releases it. This does not happen with pthread
> mutexes AFAIK.

Really? I'd be surprised if there's any difference. The whole point of a mutex 
is that it blocks until the other thread releases it.  At best it sounds like 
you're making fairly sketchy assumptions about the host OS scheduler 
heuristics.

Paul

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [patch 1/2] qemu: sem/thread helpers
  2009-03-17 23:07       ` Paul Brook
@ 2009-03-17 23:43         ` Marcelo Tosatti
  0 siblings, 0 replies; 12+ messages in thread
From: Marcelo Tosatti @ 2009-03-17 23:43 UTC (permalink / raw)
  To: Paul Brook; +Cc: Anthony Liguori, qemu-devel

On Tue, Mar 17, 2009 at 11:07:07PM +0000, Paul Brook wrote:
> On Tuesday 17 March 2009, Marcelo Tosatti wrote:
> > On Sun, Mar 15, 2009 at 04:15:52PM +0200, Avi Kivity wrote:
> > > Marcelo Tosatti wrote:
> > >> +
> > >> +struct QemuSem {
> > >> +    sem_t sema;
> > >> +};
> > >
> > > Why aren't you using pthread_mutex_t?
> >
> > Because we want the thread waiting on the lock to be awakened
> > once the holder releases it. This does not happen with pthread
> > mutexes AFAIK.
> 
> Really? I'd be surprised if there's any difference. The whole point of a mutex 
> is that it blocks until the other thread releases it.  At best it sounds like 
> you're making fairly sketchy assumptions about the host OS scheduler 
> heuristics.

Actually scratch that. Of course mutex_unlock wakes up waiters. Will
send a better patch soon.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [patch 1/2] qemu: sem/thread helpers
  2009-03-11 16:48   ` [Qemu-devel] " Paul Brook
  2009-03-11 16:56     ` Anthony Liguori
  2009-03-11 16:58     ` Marcelo Tosatti
@ 2009-03-18 18:47     ` Marcelo Tosatti
  2 siblings, 0 replies; 12+ messages in thread
From: Marcelo Tosatti @ 2009-03-18 18:47 UTC (permalink / raw)
  To: qemu-devel, Paul Brook; +Cc: Anthony Liguori

On Wed, Mar 11, 2009 at 04:48:01PM +0000, Paul Brook wrote:
> > +int qemu_sem_init(QemuSem *sem);
> > +int qemu_sem_lock(QemuSem *sem);
> 
> Please don't introduce yet annother set of locking routines.

I don't see any mutexes around? Or do you mean the wrappers around
pthread mutexes/threads are bad? I assume it might be useful for the
Windows port.

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2009-03-18 18:50 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-03-11 16:16 [Qemu-devel] [patch 0/2] RFC: separate thread for IO Marcelo Tosatti
2009-03-11 16:16 ` [Qemu-devel] [patch 1/2] qemu: sem/thread helpers Marcelo Tosatti
2009-03-11 16:33   ` [Qemu-devel] " Anthony Liguori
2009-03-11 16:48   ` [Qemu-devel] " Paul Brook
2009-03-11 16:56     ` Anthony Liguori
2009-03-11 16:58     ` Marcelo Tosatti
2009-03-18 18:47     ` Marcelo Tosatti
2009-03-15 14:15   ` Avi Kivity
2009-03-17 17:42     ` Marcelo Tosatti
2009-03-17 23:07       ` Paul Brook
2009-03-17 23:43         ` Marcelo Tosatti
2009-03-11 16:16 ` [Qemu-devel] [patch 2/2] qemu: separate thread for io Marcelo Tosatti

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).