qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>
Subject: [Qemu-devel] [PATCH 2/5] threadpool: add thread_pool_new() and thread_pool_free()
Date: Wed,  6 Mar 2013 16:45:32 +0100	[thread overview]
Message-ID: <1362584735-30911-3-git-send-email-stefanha@redhat.com> (raw)
In-Reply-To: <1362584735-30911-1-git-send-email-stefanha@redhat.com>

ThreadPool is tied to an AioContext through its event notifier, which
dictates in which AioContext the work item's callback function will be
invoked.

In order to support multiple AioContexts we need to support multiple
ThreadPool instances.

This patch adds the new/free functions.  The free function deserves
special attention because it quiesces remaining worker threads.  This
requires a new condition variable and a "stopping" flag to let workers
know they should terminate once idle.

We never needed to do this before since the global threadpool was not
explicitly destroyed until process termination.

Also stash the AioContext pointer in ThreadPool so that we can call
aio_set_event_notifier() in thread_pool_free().  We didn't need to hold
onto AioContext previously since there was no free function.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 include/block/thread-pool.h |  5 ++++
 thread-pool.c               | 56 +++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 57 insertions(+), 4 deletions(-)

diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
index 200703e..e1453c6 100644
--- a/include/block/thread-pool.h
+++ b/include/block/thread-pool.h
@@ -26,6 +26,11 @@
 
 typedef int ThreadPoolFunc(void *opaque);
 
+typedef struct ThreadPool ThreadPool;
+
+ThreadPool *thread_pool_new(struct AioContext *ctx);
+void thread_pool_free(ThreadPool *pool);
+
 BlockDriverAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
      BlockDriverCompletionFunc *cb, void *opaque);
 int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg);
diff --git a/thread-pool.c b/thread-pool.c
index 8a957b9..7a07408 100644
--- a/thread-pool.c
+++ b/thread-pool.c
@@ -24,8 +24,6 @@
 #include "qemu/event_notifier.h"
 #include "block/thread-pool.h"
 
-typedef struct ThreadPool ThreadPool;
-
 static void do_spawn_thread(ThreadPool *pool);
 
 typedef struct ThreadPoolElement ThreadPoolElement;
@@ -59,8 +57,10 @@ struct ThreadPoolElement {
 
 struct ThreadPool {
     EventNotifier notifier;
+    AioContext *ctx;
     QemuMutex lock;
     QemuCond check_cancel;
+    QemuCond worker_stopped;
     QemuSemaphore sem;
     int max_threads;
     QEMUBH *new_thread_bh;
@@ -75,6 +75,7 @@ struct ThreadPool {
     int new_threads;     /* backlog of threads we need to create */
     int pending_threads; /* threads created but not running yet */
     int pending_cancellations; /* whether we need a cond_broadcast */
+    bool stopping;
 };
 
 /* Currently there is only one thread pool instance. */
@@ -88,7 +89,7 @@ static void *worker_thread(void *opaque)
     pool->pending_threads--;
     do_spawn_thread(pool);
 
-    while (1) {
+    while (!pool->stopping) {
         ThreadPoolElement *req;
         int ret;
 
@@ -99,7 +100,7 @@ static void *worker_thread(void *opaque)
             qemu_mutex_lock(&pool->lock);
             pool->idle_threads--;
         } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list));
-        if (ret == -1) {
+        if (ret == -1 || pool->stopping) {
             break;
         }
 
@@ -124,6 +125,7 @@ static void *worker_thread(void *opaque)
     }
 
     pool->cur_threads--;
+    qemu_cond_signal(&pool->worker_stopped);
     qemu_mutex_unlock(&pool->lock);
     return NULL;
 }
@@ -294,8 +296,10 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
 {
     memset(pool, 0, sizeof(*pool));
     event_notifier_init(&pool->notifier, false);
+    pool->ctx = ctx;
     qemu_mutex_init(&pool->lock);
     qemu_cond_init(&pool->check_cancel);
+    qemu_cond_init(&pool->worker_stopped);
     qemu_sem_init(&pool->sem, 0);
     pool->max_threads = 64;
     if (ctx) {
@@ -316,6 +320,50 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
     }
 }
 
+ThreadPool *thread_pool_new(AioContext *ctx)
+{
+    ThreadPool *pool = g_new(ThreadPool, 1);
+    thread_pool_init_one(pool, ctx);
+    return pool;
+}
+
+void thread_pool_free(ThreadPool *pool)
+{
+    if (!pool) {
+        return;
+    }
+
+    assert(QLIST_EMPTY(&pool->head));
+
+    qemu_mutex_lock(&pool->lock);
+
+    /* Stop new threads from spawning */
+    qemu_bh_delete(pool->new_thread_bh);
+    pool->cur_threads -= pool->new_threads;
+    pool->new_threads = 0;
+
+    /* Wait for worker threads to terminate */
+    pool->stopping = true;
+    while (pool->cur_threads > 0) {
+        qemu_sem_post(&pool->sem);
+        qemu_cond_wait(&pool->worker_stopped, &pool->lock);
+    }
+
+    qemu_mutex_unlock(&pool->lock);
+
+    if (pool->ctx) {
+        aio_set_event_notifier(pool->ctx, &pool->notifier, NULL, NULL);
+    } else {
+        qemu_aio_set_event_notifier(&pool->notifier, NULL, NULL);
+    }
+    qemu_sem_destroy(&pool->sem);
+    qemu_cond_destroy(&pool->check_cancel);
+    qemu_cond_destroy(&pool->worker_stopped);
+    qemu_mutex_destroy(&pool->lock);
+    event_notifier_cleanup(&pool->notifier);
+    g_free(pool);
+}
+
 static void thread_pool_init(void)
 {
     thread_pool_init_one(&global_pool, NULL);
-- 
1.8.1.4

  parent reply	other threads:[~2013-03-06 15:46 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-03-06 15:45 [Qemu-devel] [PATCH 0/5] threadpool: support multiple ThreadPools Stefan Hajnoczi
2013-03-06 15:45 ` [Qemu-devel] [PATCH 1/5] threadpool: move globals into struct ThreadPool Stefan Hajnoczi
2013-03-06 17:25   ` Paolo Bonzini
2013-03-08  3:18   ` Wenchao Xia
2013-03-06 15:45 ` Stefan Hajnoczi [this message]
2013-03-06 16:36   ` [Qemu-devel] [PATCH 2/5] threadpool: add thread_pool_new() and thread_pool_free() Paolo Bonzini
2013-03-06 15:45 ` [Qemu-devel] [PATCH 3/5] aio: add a ThreadPool instance to AioContext Stefan Hajnoczi
2013-03-06 17:24   ` Paolo Bonzini
2013-03-07 10:02     ` Stefan Hajnoczi
2013-03-06 15:45 ` [Qemu-devel] [PATCH 4/5] main-loop: add qemu_get_aio_context() Stefan Hajnoczi
2013-03-06 17:26   ` Paolo Bonzini
2013-03-06 15:45 ` [Qemu-devel] [PATCH 5/5] threadpool: drop global thread pool Stefan Hajnoczi
2013-03-06 16:35   ` Paolo Bonzini
2013-03-07 10:07     ` Stefan Hajnoczi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1362584735-30911-3-git-send-email-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).