qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: Anthony Liguori <aliguori@us.ibm.com>
Subject: [Qemu-devel] [PATCH 19/28] threadpool: move globals into struct ThreadPool
Date: Fri, 15 Mar 2013 16:14:16 +0100	[thread overview]
Message-ID: <1363360465-5247-20-git-send-email-stefanha@redhat.com> (raw)
In-Reply-To: <1363360465-5247-1-git-send-email-stefanha@redhat.com>

Move global variables into a struct so multiple thread pools can be
supported in the future.

This patch does not change thread-pool.h interfaces.  There is still a
global thread pool and it is not yet possible to create/destroy
individual thread pools.  Moving the variables into a struct first makes
later patches easier to review.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
---
 thread-pool.c | 190 +++++++++++++++++++++++++++++++++-------------------------
 trace-events  |   4 +-
 2 files changed, 112 insertions(+), 82 deletions(-)

diff --git a/thread-pool.c b/thread-pool.c
index e3ca64d..a0aecd0 100644
--- a/thread-pool.c
+++ b/thread-pool.c
@@ -24,7 +24,9 @@
 #include "qemu/event_notifier.h"
 #include "block/thread-pool.h"
 
-static void do_spawn_thread(void);
+typedef struct ThreadPool ThreadPool;
+
+static void do_spawn_thread(ThreadPool *pool);
 
 typedef struct ThreadPoolElement ThreadPoolElement;
 
@@ -37,6 +39,7 @@ enum ThreadState {
 
 struct ThreadPoolElement {
     BlockDriverAIOCB common;
+    ThreadPool *pool;
     ThreadPoolFunc *func;
     void *arg;
 
@@ -54,49 +57,56 @@ struct ThreadPoolElement {
     QLIST_ENTRY(ThreadPoolElement) all;
 };
 
-static EventNotifier notifier;
-static QemuMutex lock;
-static QemuCond check_cancel;
-static QemuSemaphore sem;
-static int max_threads = 64;
-static QEMUBH *new_thread_bh;
-
-/* The following variables are protected by the global mutex.  */
-static QLIST_HEAD(, ThreadPoolElement) head;
-
-/* The following variables are protected by lock.  */
-static QTAILQ_HEAD(, ThreadPoolElement) request_list;
-static int cur_threads;
-static int idle_threads;
-static int new_threads;     /* backlog of threads we need to create */
-static int pending_threads; /* threads created but not running yet */
-static int pending_cancellations; /* whether we need a cond_broadcast */
-
-static void *worker_thread(void *unused)
+struct ThreadPool {
+    EventNotifier notifier;
+    QemuMutex lock;
+    QemuCond check_cancel;
+    QemuSemaphore sem;
+    int max_threads;
+    QEMUBH *new_thread_bh;
+
+    /* The following variables are only accessed from one AioContext. */
+    QLIST_HEAD(, ThreadPoolElement) head;
+
+    /* The following variables are protected by lock.  */
+    QTAILQ_HEAD(, ThreadPoolElement) request_list;
+    int cur_threads;
+    int idle_threads;
+    int new_threads;     /* backlog of threads we need to create */
+    int pending_threads; /* threads created but not running yet */
+    int pending_cancellations; /* whether we need a cond_broadcast */
+};
+
+/* Currently there is only one thread pool instance. */
+static ThreadPool global_pool;
+
+static void *worker_thread(void *opaque)
 {
-    qemu_mutex_lock(&lock);
-    pending_threads--;
-    do_spawn_thread();
+    ThreadPool *pool = opaque;
+
+    qemu_mutex_lock(&pool->lock);
+    pool->pending_threads--;
+    do_spawn_thread(pool);
 
     while (1) {
         ThreadPoolElement *req;
         int ret;
 
         do {
-            idle_threads++;
-            qemu_mutex_unlock(&lock);
-            ret = qemu_sem_timedwait(&sem, 10000);
-            qemu_mutex_lock(&lock);
-            idle_threads--;
-        } while (ret == -1 && !QTAILQ_EMPTY(&request_list));
+            pool->idle_threads++;
+            qemu_mutex_unlock(&pool->lock);
+            ret = qemu_sem_timedwait(&pool->sem, 10000);
+            qemu_mutex_lock(&pool->lock);
+            pool->idle_threads--;
+        } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list));
         if (ret == -1) {
             break;
         }
 
-        req = QTAILQ_FIRST(&request_list);
-        QTAILQ_REMOVE(&request_list, req, reqs);
+        req = QTAILQ_FIRST(&pool->request_list);
+        QTAILQ_REMOVE(&pool->request_list, req, reqs);
         req->state = THREAD_ACTIVE;
-        qemu_mutex_unlock(&lock);
+        qemu_mutex_unlock(&pool->lock);
 
         ret = req->func(req->arg);
 
@@ -105,45 +115,47 @@ static void *worker_thread(void *unused)
         smp_wmb();
         req->state = THREAD_DONE;
 
-        qemu_mutex_lock(&lock);
-        if (pending_cancellations) {
-            qemu_cond_broadcast(&check_cancel);
+        qemu_mutex_lock(&pool->lock);
+        if (pool->pending_cancellations) {
+            qemu_cond_broadcast(&pool->check_cancel);
         }
 
-        event_notifier_set(&notifier);
+        event_notifier_set(&pool->notifier);
     }
 
-    cur_threads--;
-    qemu_mutex_unlock(&lock);
+    pool->cur_threads--;
+    qemu_mutex_unlock(&pool->lock);
     return NULL;
 }
 
-static void do_spawn_thread(void)
+static void do_spawn_thread(ThreadPool *pool)
 {
     QemuThread t;
 
     /* Runs with lock taken.  */
-    if (!new_threads) {
+    if (!pool->new_threads) {
         return;
     }
 
-    new_threads--;
-    pending_threads++;
+    pool->new_threads--;
+    pool->pending_threads++;
 
-    qemu_thread_create(&t, worker_thread, NULL, QEMU_THREAD_DETACHED);
+    qemu_thread_create(&t, worker_thread, pool, QEMU_THREAD_DETACHED);
 }
 
 static void spawn_thread_bh_fn(void *opaque)
 {
-    qemu_mutex_lock(&lock);
-    do_spawn_thread();
-    qemu_mutex_unlock(&lock);
+    ThreadPool *pool = opaque;
+
+    qemu_mutex_lock(&pool->lock);
+    do_spawn_thread(pool);
+    qemu_mutex_unlock(&pool->lock);
 }
 
-static void spawn_thread(void)
+static void spawn_thread(ThreadPool *pool)
 {
-    cur_threads++;
-    new_threads++;
+    pool->cur_threads++;
+    pool->new_threads++;
     /* If there are threads being created, they will spawn new workers, so
      * we don't spend time creating many threads in a loop holding a mutex or
      * starving the current vcpu.
@@ -151,23 +163,25 @@ static void spawn_thread(void)
      * If there are no idle threads, ask the main thread to create one, so we
      * inherit the correct affinity instead of the vcpu affinity.
      */
-    if (!pending_threads) {
-        qemu_bh_schedule(new_thread_bh);
+    if (!pool->pending_threads) {
+        qemu_bh_schedule(pool->new_thread_bh);
     }
 }
 
 static void event_notifier_ready(EventNotifier *notifier)
 {
+    ThreadPool *pool = container_of(notifier, ThreadPool, notifier);
     ThreadPoolElement *elem, *next;
 
     event_notifier_test_and_clear(notifier);
 restart:
-    QLIST_FOREACH_SAFE(elem, &head, all, next) {
+    QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
         if (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) {
             continue;
         }
         if (elem->state == THREAD_DONE) {
-            trace_thread_pool_complete(elem, elem->common.opaque, elem->ret);
+            trace_thread_pool_complete(pool, elem, elem->common.opaque,
+                                       elem->ret);
         }
         if (elem->state == THREAD_DONE && elem->common.cb) {
             QLIST_REMOVE(elem, all);
@@ -186,34 +200,36 @@ restart:
 
 static int thread_pool_active(EventNotifier *notifier)
 {
-    return !QLIST_EMPTY(&head);
+    ThreadPool *pool = container_of(notifier, ThreadPool, notifier);
+    return !QLIST_EMPTY(&pool->head);
 }
 
 static void thread_pool_cancel(BlockDriverAIOCB *acb)
 {
     ThreadPoolElement *elem = (ThreadPoolElement *)acb;
+    ThreadPool *pool = elem->pool;
 
     trace_thread_pool_cancel(elem, elem->common.opaque);
 
-    qemu_mutex_lock(&lock);
+    qemu_mutex_lock(&pool->lock);
     if (elem->state == THREAD_QUEUED &&
         /* No thread has yet started working on elem. we can try to "steal"
          * the item from the worker if we can get a signal from the
          * semaphore.  Because this is non-blocking, we can do it with
          * the lock taken and ensure that elem will remain THREAD_QUEUED.
          */
-        qemu_sem_timedwait(&sem, 0) == 0) {
-        QTAILQ_REMOVE(&request_list, elem, reqs);
+        qemu_sem_timedwait(&pool->sem, 0) == 0) {
+        QTAILQ_REMOVE(&pool->request_list, elem, reqs);
         elem->state = THREAD_CANCELED;
-        event_notifier_set(&notifier);
+        event_notifier_set(&pool->notifier);
     } else {
-        pending_cancellations++;
+        pool->pending_cancellations++;
         while (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) {
-            qemu_cond_wait(&check_cancel, &lock);
+            qemu_cond_wait(&pool->check_cancel, &pool->lock);
         }
-        pending_cancellations--;
+        pool->pending_cancellations--;
     }
-    qemu_mutex_unlock(&lock);
+    qemu_mutex_unlock(&pool->lock);
 }
 
 static const AIOCBInfo thread_pool_aiocb_info = {
@@ -224,24 +240,26 @@ static const AIOCBInfo thread_pool_aiocb_info = {
 BlockDriverAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
         BlockDriverCompletionFunc *cb, void *opaque)
 {
+    ThreadPool *pool = &global_pool;
     ThreadPoolElement *req;
 
     req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque);
     req->func = func;
     req->arg = arg;
     req->state = THREAD_QUEUED;
+    req->pool = pool;
 
-    QLIST_INSERT_HEAD(&head, req, all);
+    QLIST_INSERT_HEAD(&pool->head, req, all);
 
-    trace_thread_pool_submit(req, arg);
+    trace_thread_pool_submit(pool, req, arg);
 
-    qemu_mutex_lock(&lock);
-    if (idle_threads == 0 && cur_threads < max_threads) {
-        spawn_thread();
+    qemu_mutex_lock(&pool->lock);
+    if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) {
+        spawn_thread(pool);
     }
-    QTAILQ_INSERT_TAIL(&request_list, req, reqs);
-    qemu_mutex_unlock(&lock);
-    qemu_sem_post(&sem);
+    QTAILQ_INSERT_TAIL(&pool->request_list, req, reqs);
+    qemu_mutex_unlock(&pool->lock);
+    qemu_sem_post(&pool->sem);
     return &req->common;
 }
 
@@ -272,18 +290,30 @@ void thread_pool_submit(ThreadPoolFunc *func, void *arg)
     thread_pool_submit_aio(func, arg, NULL, NULL);
 }
 
+static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
+{
+    if (!ctx) {
+        ctx = qemu_get_aio_context();
+    }
+
+    memset(pool, 0, sizeof(*pool));
+    event_notifier_init(&pool->notifier, false);
+    qemu_mutex_init(&pool->lock);
+    qemu_cond_init(&pool->check_cancel);
+    qemu_sem_init(&pool->sem, 0);
+    pool->max_threads = 64;
+    pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool);
+
+    QLIST_INIT(&pool->head);
+    QTAILQ_INIT(&pool->request_list);
+
+    aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready,
+                           thread_pool_active);
+}
+
 static void thread_pool_init(void)
 {
-    QLIST_INIT(&head);
-    event_notifier_init(&notifier, false);
-    qemu_mutex_init(&lock);
-    qemu_cond_init(&check_cancel);
-    qemu_sem_init(&sem, 0);
-    qemu_aio_set_event_notifier(&notifier, event_notifier_ready,
-                                thread_pool_active);
-
-    QTAILQ_INIT(&request_list);
-    new_thread_bh = qemu_bh_new(spawn_thread_bh_fn, NULL);
+    thread_pool_init_one(&global_pool, NULL);
 }
 
 block_init(thread_pool_init)
diff --git a/trace-events b/trace-events
index d6a847d..cd73b7f 100644
--- a/trace-events
+++ b/trace-events
@@ -115,8 +115,8 @@ virtio_blk_data_plane_complete_request(void *s, unsigned int head, int ret) "dat
 vring_setup(uint64_t physical, void *desc, void *avail, void *used) "vring physical %#"PRIx64" desc %p avail %p used %p"
 
 # thread-pool.c
-thread_pool_submit(void *req, void *opaque) "req %p opaque %p"
-thread_pool_complete(void *req, void *opaque, int ret) "req %p opaque %p ret %d"
+thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
+thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
 thread_pool_cancel(void *req, void *opaque) "req %p opaque %p"
 
 # posix-aio-compat.c
-- 
1.8.1.4

  parent reply	other threads:[~2013-03-15 15:15 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-03-15 15:13 [Qemu-devel] [PULL 00/28] Block patches Stefan Hajnoczi
2013-03-15 15:13 ` [Qemu-devel] [PATCH 01/28] block: Add options QDict to .bdrv_open() Stefan Hajnoczi
2013-03-15 15:13 ` [Qemu-devel] [PATCH 02/28] block: Add options QDict to bdrv_open() prototype Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 03/28] Add qdict_clone_shallow() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 04/28] block: Add options QDict to bdrv_open_common() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 05/28] qemu-option: Add qemu_opts_absorb_qdict() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 06/28] blockdev: Keep a copy of DriveInfo.serial Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 07/28] block: Support driver specific options in drive_init() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 08/28] qcow2: Allow lazy refcounts to be enabled on the command line Stefan Hajnoczi
2013-03-15 17:02   ` Paolo Bonzini
2013-03-15 17:35     ` Anthony Liguori
2013-03-15 15:14 ` [Qemu-devel] [PATCH 09/28] qcow2: flush refcount cache correctly in alloc_refcount_block() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 10/28] qcow2: flush refcount cache correctly in qcow2_write_snapshots() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 11/28] qcow2: set L2 cache dependency in qcow2_alloc_bytes() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 12/28] qcow2: flush in qcow2_update_snapshot_refcount() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 13/28] qcow2: drop flush in update_cluster_refcount() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 14/28] qcow2: drop unnecessary flush in qcow2_update_snapshot_refcount() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 15/28] qcow2: make is_allocated return true for zero clusters Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 16/28] sheepdog: use non-blocking fd in coroutine context Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 17/28] sheepdog: set io_flush handler in do_co_req Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 18/28] main-loop: add qemu_get_aio_context() Stefan Hajnoczi
2013-03-15 15:14 ` Stefan Hajnoczi [this message]
2013-03-15 15:14 ` [Qemu-devel] [PATCH 20/28] threadpool: add thread_pool_new() and thread_pool_free() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 21/28] aio: add a ThreadPool instance to AioContext Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 22/28] block: add bdrv_get_aio_context() Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 23/28] threadpool: drop global thread pool Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 24/28] coroutine: use AioContext for CoQueue BH Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 25/28] dataplane: fix hang introduced by AioContext transition Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 26/28] qemu-iotests: add tests for rebasing zero clusters Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 27/28] qemu-iotests: use -nographic in test case 007 Stefan Hajnoczi
2013-03-15 15:14 ` [Qemu-devel] [PATCH 28/28] blockdev: Fix up copyright and permission notice Stefan Hajnoczi
2013-03-15 15:46 ` [Qemu-devel] [PULL 00/28] Block patches Anthony Liguori

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1363360465-5247-20-git-send-email-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=aliguori@us.ibm.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).