qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: mtosatti@redhat.com
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] [patch 1/7] qemu: mutex/thread/cond wrappers
Date: Thu, 19 Mar 2009 11:57:06 -0300	[thread overview]
Message-ID: <20090319150537.801001000@localhost.localdomain> (raw)
In-Reply-To: 20090319145705.988576615@localhost.localdomain

[-- Attachment #1: iothread-mutex --]
[-- Type: text/plain, Size: 5444 bytes --]


Index: qemu/Makefile.target
===================================================================
--- qemu.orig/Makefile.target
+++ qemu/Makefile.target
@@ -500,7 +500,7 @@ endif #CONFIG_BSD_USER
 # System emulator target
 ifndef CONFIG_USER_ONLY
 
-OBJS=vl.o osdep.o monitor.o pci.o loader.o isa_mmio.o machine.o
+OBJS=vl.o qemu-thread.o osdep.o monitor.o pci.o loader.o isa_mmio.o machine.o
 # virtio has to be here due to weird dependency between PCI and virtio-net.
 # need to fix this properly
 OBJS+=virtio.o virtio-blk.o virtio-balloon.o virtio-net.o virtio-console.o
Index: qemu/vl.c
===================================================================
--- qemu.orig/vl.c
+++ qemu/vl.c
@@ -36,6 +36,7 @@
 #include "gdbstub.h"
 #include "qemu-timer.h"
 #include "qemu-char.h"
+#include "qemu-thread.h"
 #include "cache-utils.h"
 #include "block.h"
 #include "audio/audio.h"
@@ -263,6 +264,8 @@ static QEMUTimer *nographic_timer;
 
 uint8_t qemu_uuid[16];
 
+QemuMutex qemu_global_mutex;
+
 /***********************************************************/
 /* x86 ISA bus support */
 
@@ -3650,7 +3653,14 @@ void main_loop_wait(int timeout)
         slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
     }
 #endif
+
+    /*
+     * main_loop_wait() *must* not assume any global state is consistent across
+     * select() invocations.
+     */
+    qemu_mutex_unlock(&qemu_global_mutex);
     ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
+    qemu_mutex_lock(&qemu_global_mutex);
     if (ret > 0) {
         IOHandlerRecord **pioh;
 
@@ -3708,6 +3718,9 @@ static int main_loop(void)
 #endif
     CPUState *env;
 
+    qemu_mutex_init(&qemu_global_mutex);
+    qemu_mutex_lock(&qemu_global_mutex);
+
     cur_cpu = first_cpu;
     next_cpu = cur_cpu->next_cpu ?: first_cpu;
     for(;;) {
Index: qemu/qemu-thread.c
===================================================================
--- /dev/null
+++ qemu/qemu-thread.c
@@ -0,0 +1,99 @@
+#include <errno.h>
+#include <time.h>
+#include <signal.h>
+#include "qemu-thread.h"
+
+int qemu_mutex_init(QemuMutex *mutex)
+{
+    return pthread_mutex_init(&mutex->lock, NULL);
+}
+
+void qemu_mutex_lock(QemuMutex *mutex)
+{
+    pthread_mutex_lock(&mutex->lock);
+}
+
+int qemu_mutex_trylock(QemuMutex *mutex)
+{
+    return pthread_mutex_trylock(&mutex->lock);
+}
+
+static void add_to_timespec(struct timespec *ts, unsigned int msecs)
+{
+    ts->tv_sec = ts->tv_sec + (long)(msecs / 1000);
+    ts->tv_nsec = (ts->tv_nsec + ((long)msecs % 1000) * 1000000);
+    if (ts->tv_nsec >= 1000000000) {
+        ts->tv_nsec -= 1000000000;
+        ts->tv_sec++;
+    }
+}
+
+int qemu_mutex_timedlock(QemuMutex *mutex, unsigned int msecs)
+{
+    struct timespec ts;
+
+    clock_gettime(CLOCK_REALTIME, &ts);
+    add_to_timespec(&ts, msecs);
+
+    return pthread_mutex_timedlock(&mutex->lock, &ts);
+}
+
+void qemu_mutex_unlock(QemuMutex *mutex)
+{
+    pthread_mutex_unlock(&mutex->lock);
+}
+
+void qemu_cond_init(QemuCond *cond)
+{
+    pthread_cond_init(&cond->cond, NULL);
+}
+
+void qemu_cond_signal(QemuCond *cond)
+{
+    pthread_cond_signal(&cond->cond);
+}
+
+void qemu_cond_broadcast(QemuCond *cond)
+{
+    pthread_cond_broadcast(&cond->cond);
+}
+
+int qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
+{
+    return pthread_cond_wait(&cond->cond, &mutex->lock);
+}
+
+int qemu_cond_timedwait(QemuCond *cond, QemuMutex *mutex, unsigned int msecs)
+{
+    struct timespec ts;
+
+    clock_gettime(CLOCK_REALTIME, &ts);
+    add_to_timespec(&ts, msecs);
+
+    return pthread_cond_timedwait(&cond->cond, &mutex->lock, &ts);
+}
+
+int qemu_thread_create(QemuThread *thread,
+                       void *(*start_routine)(void*),
+                       void *arg)
+{
+    return pthread_create(&thread->thread, NULL, start_routine, arg);
+}
+
+int qemu_thread_signal(QemuThread *thread, int sig)
+{
+    if (thread->thread != 0)
+        return pthread_kill(thread->thread, sig);
+    return -1; /* XXX: ESCHR */
+}
+
+void qemu_thread_self(QemuThread *thread)
+{
+    thread->thread = pthread_self();
+}
+
+int qemu_thread_equal(QemuThread *thread1, QemuThread *thread2)
+{
+   return (thread1->thread == thread2->thread);
+}
+
Index: qemu/qemu-thread.h
===================================================================
--- /dev/null
+++ qemu/qemu-thread.h
@@ -0,0 +1,38 @@
+#include "semaphore.h"
+#include "pthread.h"
+
+struct QemuMutex {
+    pthread_mutex_t lock;
+};
+
+struct QemuCond {
+    pthread_cond_t cond;
+};
+
+struct QemuThread {
+    pthread_t thread;
+};
+
+typedef struct QemuMutex QemuMutex;
+typedef struct QemuCond QemuCond;
+typedef struct QemuThread QemuThread;
+
+int qemu_mutex_init(QemuMutex *mutex);
+void qemu_mutex_lock(QemuMutex *mutex);
+int qemu_mutex_trylock(QemuMutex *mutex);
+int qemu_mutex_timedlock(QemuMutex *mutex, unsigned int msecs);
+void qemu_mutex_unlock(QemuMutex *mutex);
+
+void qemu_cond_init(QemuCond *cond);
+void qemu_cond_signal(QemuCond *cond);
+void qemu_cond_broadcast(QemuCond *cond);
+int qemu_cond_wait(QemuCond *cond, QemuMutex *mutex);
+int qemu_cond_timedwait(QemuCond *cond, QemuMutex *mutex, unsigned int msecs);
+
+int qemu_thread_create(QemuThread *thread,
+                       void *(*start_routine)(void*),
+                       void *arg);
+int qemu_thread_signal(QemuThread *thread, int sig);
+void qemu_thread_self(QemuThread *thread);
+int qemu_thread_equal(QemuThread *thread1, QemuThread *thread2);
+

-- 

  reply	other threads:[~2009-03-19 15:06 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-03-19 14:57 [Qemu-devel] [patch 0/7] separate thread for io v2 mtosatti
2009-03-19 14:57 ` mtosatti [this message]
2009-03-19 15:59   ` [Qemu-devel] [patch 1/7] qemu: mutex/thread/cond wrappers Avi Kivity
2009-03-19 18:49     ` Jamie Lokier
2009-03-23 23:17     ` Marcelo Tosatti
2009-03-24  7:43       ` Avi Kivity
2009-03-19 14:57 ` [Qemu-devel] [patch 2/7] qemu: separate thread for io mtosatti
2009-03-20 17:43   ` [Qemu-devel] " Anthony Liguori
2009-03-21  0:06     ` Marcelo Tosatti
2009-03-21  1:04       ` Anthony Liguori
2009-03-21  1:44         ` Marcelo Tosatti
2009-03-21  1:50           ` Anthony Liguori
2009-03-22  8:48             ` Avi Kivity
2009-03-22 11:17               ` Anthony Liguori
2009-03-19 14:57 ` [Qemu-devel] [patch 3/7] qemu: main thread does io and cpu thread is spawned mtosatti
2009-03-19 14:57 ` [Qemu-devel] [patch 4/7] qemu: handle reset/poweroff/shutdown in iothread mtosatti
2009-03-19 14:57 ` [Qemu-devel] [patch 5/7] qemu: pause and resume cpu thread(s) mtosatti
2009-03-19 14:57 ` [Qemu-devel] [patch 6/7] qemu: handle vmstop from cpu context mtosatti
2009-03-19 14:57 ` [Qemu-devel] [patch 7/7] qemu: use pipe to wakeup io thread mtosatti

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20090319150537.801001000@localhost.localdomain \
    --to=mtosatti@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).