qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Marcelo Tosatti <mtosatti@redhat.com>
To: qemu-devel@nongnu.org
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Subject: [Qemu-devel] [patch 02/10] qemu: mutex/thread/cond wrappers
Date: Wed, 25 Mar 2009 19:47:16 -0300	[thread overview]
Message-ID: <20090325225438.835198466@amt.cnet> (raw)
In-Reply-To: 20090325224714.853788328@amt.cnet

[-- Attachment #1: iothread-mutex --]
[-- Type: text/plain, Size: 4790 bytes --]

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: trunk/qemu-thread.c
===================================================================
--- /dev/null
+++ trunk/qemu-thread.c
@@ -0,0 +1,123 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <time.h>
+#include <signal.h>
+#include <stdint.h>
+#include "qemu-thread.h"
+
+static void error_exit(const char *msg)
+{
+    perror(msg);
+    exit(1);
+}
+
+void qemu_mutex_init(QemuMutex *mutex)
+{
+    if (pthread_mutex_init(&mutex->lock, NULL))
+        error_exit(__func__);
+}
+
+void qemu_mutex_lock(QemuMutex *mutex)
+{
+    if (pthread_mutex_lock(&mutex->lock))
+        error_exit(__func__);
+}
+
+int qemu_mutex_trylock(QemuMutex *mutex)
+{
+    return pthread_mutex_trylock(&mutex->lock);
+}
+
+static void timespec_add_ms(struct timespec *ts, uint64_t msecs)
+{
+    ts->tv_sec = ts->tv_sec + (long)(msecs / 1000);
+    ts->tv_nsec = (ts->tv_nsec + ((long)msecs % 1000) * 1000000);
+    if (ts->tv_nsec >= 1000000000) {
+        ts->tv_nsec -= 1000000000;
+        ts->tv_sec++;
+    }
+}
+
+int qemu_mutex_timedlock(QemuMutex *mutex, uint64_t msecs)
+{
+    int r;
+    struct timespec ts;
+
+    clock_gettime(CLOCK_REALTIME, &ts);
+    timespec_add_ms(&ts, msecs);
+
+    r = pthread_mutex_timedlock(&mutex->lock, &ts);
+    if (r && r != ETIMEDOUT)
+        error_exit(__func__);
+    return r;
+}
+
+void qemu_mutex_unlock(QemuMutex *mutex)
+{
+    if (pthread_mutex_unlock(&mutex->lock))
+        error_exit(__func__);
+}
+
+void qemu_cond_init(QemuCond *cond)
+{
+    if (pthread_cond_init(&cond->cond, NULL))
+        error_exit(__func__);
+}
+
+void qemu_cond_signal(QemuCond *cond)
+{
+    if (pthread_cond_signal(&cond->cond))
+        error_exit(__func__);
+}
+
+void qemu_cond_broadcast(QemuCond *cond)
+{
+    if (pthread_cond_broadcast(&cond->cond))
+        error_exit(__func__);
+}
+
+void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
+{
+    if (pthread_cond_wait(&cond->cond, &mutex->lock))
+        error_exit(__func__);
+}
+
+int qemu_cond_timedwait(QemuCond *cond, QemuMutex *mutex, uint64_t msecs)
+{
+    struct timespec ts;
+    int r;
+
+    clock_gettime(CLOCK_REALTIME, &ts);
+    timespec_add_ms(&ts, msecs);
+
+    r = pthread_cond_timedwait(&cond->cond, &mutex->lock, &ts);
+    if (r && r != ETIMEDOUT)
+        error_exit(__func__);
+    return r;
+}
+
+void qemu_thread_create(QemuThread *thread,
+                       void *(*start_routine)(void*),
+                       void *arg)
+{
+    if (pthread_create(&thread->thread, NULL, start_routine, arg))
+        error_exit(__func__);
+}
+
+void qemu_thread_signal(QemuThread *thread, int sig)
+{
+    if (pthread_kill(thread->thread, sig))
+        error_exit(__func__);
+}
+
+void qemu_thread_self(QemuThread *thread)
+{
+    thread->thread = pthread_self();
+}
+
+int qemu_thread_equal(QemuThread *thread1, QemuThread *thread2)
+{
+   return (thread1->thread == thread2->thread);
+}
+
Index: trunk/qemu-thread.h
===================================================================
--- /dev/null
+++ trunk/qemu-thread.h
@@ -0,0 +1,40 @@
+#ifndef __QEMU_THREAD_H
+#define __QEMU_THREAD_H 1
+#include "semaphore.h"
+#include "pthread.h"
+
+struct QemuMutex {
+    pthread_mutex_t lock;
+};
+
+struct QemuCond {
+    pthread_cond_t cond;
+};
+
+struct QemuThread {
+    pthread_t thread;
+};
+
+typedef struct QemuMutex QemuMutex;
+typedef struct QemuCond QemuCond;
+typedef struct QemuThread QemuThread;
+
+void qemu_mutex_init(QemuMutex *mutex);
+void qemu_mutex_lock(QemuMutex *mutex);
+int qemu_mutex_trylock(QemuMutex *mutex);
+int qemu_mutex_timedlock(QemuMutex *mutex, uint64_t msecs);
+void qemu_mutex_unlock(QemuMutex *mutex);
+
+void qemu_cond_init(QemuCond *cond);
+void qemu_cond_signal(QemuCond *cond);
+void qemu_cond_broadcast(QemuCond *cond);
+void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex);
+int qemu_cond_timedwait(QemuCond *cond, QemuMutex *mutex, uint64_t msecs);
+
+void qemu_thread_create(QemuThread *thread,
+                       void *(*start_routine)(void*),
+                       void *arg);
+void qemu_thread_signal(QemuThread *thread, int sig);
+void qemu_thread_self(QemuThread *thread);
+int qemu_thread_equal(QemuThread *thread1, QemuThread *thread2);
+#endif
Index: trunk/Makefile.target
===================================================================
--- trunk.orig/Makefile.target
+++ trunk/Makefile.target
@@ -501,6 +501,7 @@ endif #CONFIG_BSD_USER
 ifndef CONFIG_USER_ONLY
 
 OBJS=vl.o osdep.o monitor.o pci.o loader.o isa_mmio.o machine.o dma-helpers.o
+OBJS+=qemu-thread.o
 # virtio has to be here due to weird dependency between PCI and virtio-net.
 # need to fix this properly
 OBJS+=virtio.o virtio-blk.o virtio-balloon.o virtio-net.o virtio-console.o

  parent reply	other threads:[~2009-03-25 22:56 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-03-25 22:47 [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 01/10] qemu: create helper for event notification Marcelo Tosatti
2009-03-25 23:18   ` Glauber Costa
2009-03-25 23:27     ` Marcelo Tosatti
2009-03-25 22:47 ` Marcelo Tosatti [this message]
2009-03-25 23:24   ` [Qemu-devel] [patch 02/10] qemu: mutex/thread/cond wrappers Glauber Costa
2009-03-25 23:29     ` Marcelo Tosatti
2009-03-26 11:01   ` malc
2009-03-25 22:47 ` [Qemu-devel] [patch 03/10] qemu: per-arch cpu_has_work Marcelo Tosatti
2009-03-25 23:26   ` Glauber Costa
2009-03-28 18:14   ` Blue Swirl
2009-03-29 20:13     ` Blue Swirl
2009-04-02 23:42       ` Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 04/10] qemu: introduce main_loop_break Marcelo Tosatti
2009-03-26  1:24   ` Glauber Costa
2009-03-26 12:27   ` Jamie Lokier
2009-04-02 23:36     ` Marcelo Tosatti
2009-04-02 23:52       ` Anthony Liguori
2009-04-03 14:08         ` Markus Armbruster
2009-04-07  3:23         ` Jamie Lokier
2009-03-25 22:47 ` [Qemu-devel] [patch 05/10] qemu: separate thread for io Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 06/10] qemu: per-cpu thread information Marcelo Tosatti
2009-03-26  1:35   ` Glauber Costa
2009-03-26  2:10     ` Marcelo Tosatti
2009-03-26  2:26       ` Glauber Costa
2009-03-26  2:41         ` Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 07/10] qemu: handle reset/poweroff/shutdown in iothread Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 08/10] qemu: pause and resume cpu threads Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 09/10] qemu: handle vmstop from cpu context Marcelo Tosatti
2009-03-25 22:47 ` [Qemu-devel] [patch 10/10] qemu: basic kvm iothread support Marcelo Tosatti
2009-03-29 20:16 ` [Qemu-devel] [patch 00/10] iothread (candidate for inclusion) Blue Swirl

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20090325225438.835198466@amt.cnet \
    --to=mtosatti@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).