From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: Paolo Bonzini <pbonzini@redhat.com>,
Christian Borntraeger <borntraeger@de.ibm.com>
Subject: [Qemu-devel] [PATCH for-2.1? 1/2] thread-pool: avoid per-thread-pool EventNotifier
Date: Fri, 11 Jul 2014 13:20:11 +0200 [thread overview]
Message-ID: <1405077612-7806-2-git-send-email-stefanha@redhat.com> (raw)
In-Reply-To: <1405077612-7806-1-git-send-email-stefanha@redhat.com>
EventNotifier is implemented using an eventfd or pipe. It therefore
consumes file descriptors, which can be limited by rlimits and should
therefore be used sparingly.
Switch from EventNotifier to QEMUBH in thread-pool.c. Originally
EventNotifier was used because qemu_bh_schedule() was not thread-safe
yet.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
thread-pool.c | 21 ++++++++-------------
1 file changed, 8 insertions(+), 13 deletions(-)
diff --git a/thread-pool.c b/thread-pool.c
index dfb699d..4cfd078 100644
--- a/thread-pool.c
+++ b/thread-pool.c
@@ -21,7 +21,6 @@
#include "block/coroutine.h"
#include "trace.h"
#include "block/block_int.h"
-#include "qemu/event_notifier.h"
#include "block/thread-pool.h"
#include "qemu/main-loop.h"
@@ -57,8 +56,8 @@ struct ThreadPoolElement {
};
struct ThreadPool {
- EventNotifier notifier;
AioContext *ctx;
+ QEMUBH *completion_bh;
QemuMutex lock;
QemuCond check_cancel;
QemuCond worker_stopped;
@@ -119,7 +118,7 @@ static void *worker_thread(void *opaque)
qemu_cond_broadcast(&pool->check_cancel);
}
- event_notifier_set(&pool->notifier);
+ qemu_bh_schedule(pool->completion_bh);
}
pool->cur_threads--;
@@ -168,12 +167,11 @@ static void spawn_thread(ThreadPool *pool)
}
}
-static void event_notifier_ready(EventNotifier *notifier)
+static void thread_pool_completion_bh(void *opaque)
{
- ThreadPool *pool = container_of(notifier, ThreadPool, notifier);
+ ThreadPool *pool = opaque;
ThreadPoolElement *elem, *next;
- event_notifier_test_and_clear(notifier);
restart:
QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
if (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) {
@@ -215,7 +213,7 @@ static void thread_pool_cancel(BlockDriverAIOCB *acb)
qemu_sem_timedwait(&pool->sem, 0) == 0) {
QTAILQ_REMOVE(&pool->request_list, elem, reqs);
elem->state = THREAD_CANCELED;
- event_notifier_set(&pool->notifier);
+ qemu_bh_schedule(pool->completion_bh);
} else {
pool->pending_cancellations++;
while (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) {
@@ -224,7 +222,7 @@ static void thread_pool_cancel(BlockDriverAIOCB *acb)
pool->pending_cancellations--;
}
qemu_mutex_unlock(&pool->lock);
- event_notifier_ready(&pool->notifier);
+ thread_pool_completion_bh(pool);
}
static const AIOCBInfo thread_pool_aiocb_info = {
@@ -293,8 +291,8 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
}
memset(pool, 0, sizeof(*pool));
- event_notifier_init(&pool->notifier, false);
pool->ctx = ctx;
+ pool->completion_bh = aio_bh_new(ctx, thread_pool_completion_bh, pool);
qemu_mutex_init(&pool->lock);
qemu_cond_init(&pool->check_cancel);
qemu_cond_init(&pool->worker_stopped);
@@ -304,8 +302,6 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
QLIST_INIT(&pool->head);
QTAILQ_INIT(&pool->request_list);
-
- aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready);
}
ThreadPool *thread_pool_new(AioContext *ctx)
@@ -339,11 +335,10 @@ void thread_pool_free(ThreadPool *pool)
qemu_mutex_unlock(&pool->lock);
- aio_set_event_notifier(pool->ctx, &pool->notifier, NULL);
+ qemu_bh_delete(pool->completion_bh);
qemu_sem_destroy(&pool->sem);
qemu_cond_destroy(&pool->check_cancel);
qemu_cond_destroy(&pool->worker_stopped);
qemu_mutex_destroy(&pool->lock);
- event_notifier_cleanup(&pool->notifier);
g_free(pool);
}
--
1.9.3
next prev parent reply other threads:[~2014-07-11 11:20 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-07-11 11:20 [Qemu-devel] [PATCH for-2.1? 0/2] thread-pool: avoid fd usage and fix nested aio_poll() deadlock Stefan Hajnoczi
2014-07-11 11:20 ` Stefan Hajnoczi [this message]
2014-07-11 11:20 ` [Qemu-devel] [PATCH for-2.1? 2/2] thread-pool: avoid deadlock in nested aio_poll() calls Stefan Hajnoczi
2014-07-14 8:36 ` Paolo Bonzini
2014-07-14 10:49 ` Paolo Bonzini
2014-07-15 14:37 ` Stefan Hajnoczi
2014-07-15 15:21 ` Paolo Bonzini
2014-07-17 12:56 ` Stefan Hajnoczi
2014-07-15 14:23 ` Stefan Hajnoczi
2014-07-11 11:37 ` [Qemu-devel] [PATCH for-2.1? 0/2] thread-pool: avoid fd usage and fix nested aio_poll() deadlock Christian Borntraeger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1405077612-7806-2-git-send-email-stefanha@redhat.com \
--to=stefanha@redhat.com \
--cc=borntraeger@de.ibm.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).