From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: famz@redhat.com, stefanha@redhat.com
Subject: [Qemu-devel] [PATCH 3/3] iothread: release iothread around aio_poll
Date: Fri, 20 Feb 2015 17:26:52 +0100 [thread overview]
Message-ID: <1424449612-18215-4-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1424449612-18215-1-git-send-email-pbonzini@redhat.com>
This is the first step towards having fine-grained critical sections in
dataplane threads, which resolves lock ordering problems between
address_space_* functions (which need the BQL when doing MMIO, even
after we complete RCU-based dispatch) and the AioContext.
Because AioContext does not use contention callbacks anymore, the
unit test has to be changed.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
async.c | 8 +-------
iothread.c | 11 ++---------
tests/test-aio.c | 21 ++++++++++++---------
3 files changed, 15 insertions(+), 25 deletions(-)
diff --git a/async.c b/async.c
index 0463dc4..e8a4c8b 100644
--- a/async.c
+++ b/async.c
@@ -289,12 +289,6 @@ static void aio_timerlist_notify(void *opaque)
aio_notify(opaque);
}
-static void aio_rfifolock_cb(void *opaque)
-{
- /* Kick owner thread in case they are blocked in aio_poll() */
- aio_notify(opaque);
-}
-
AioContext *aio_context_new(Error **errp)
{
int ret;
@@ -312,7 +306,7 @@ AioContext *aio_context_new(Error **errp)
event_notifier_test_and_clear);
ctx->thread_pool = NULL;
qemu_mutex_init(&ctx->bh_lock);
- rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
+ rfifolock_init(&ctx->lock, NULL, NULL);
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
return ctx;
diff --git a/iothread.c b/iothread.c
index 342a23f..a1f9109 100644
--- a/iothread.c
+++ b/iothread.c
@@ -31,21 +31,14 @@ typedef ObjectClass IOThreadClass;
static void *iothread_run(void *opaque)
{
IOThread *iothread = opaque;
- bool blocking;
qemu_mutex_lock(&iothread->init_done_lock);
iothread->thread_id = qemu_get_thread_id();
qemu_cond_signal(&iothread->init_done_cond);
qemu_mutex_unlock(&iothread->init_done_lock);
- while (!iothread->stopping) {
- aio_context_acquire(iothread->ctx);
- blocking = true;
- while (!iothread->stopping && aio_poll(iothread->ctx, blocking)) {
- /* Progress was made, keep going */
- blocking = false;
- }
- aio_context_release(iothread->ctx);
+ while (!atomic_read(&iothread->stopping)) {
+ aio_poll(iothread->ctx, true);
}
return NULL;
}
diff --git a/tests/test-aio.c b/tests/test-aio.c
index a7cb5c9..f88a042 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -107,6 +107,7 @@ static void test_notify(void)
typedef struct {
QemuMutex start_lock;
+ EventNotifier notifier;
bool thread_acquired;
} AcquireTestData;
@@ -118,6 +119,8 @@ static void *test_acquire_thread(void *opaque)
qemu_mutex_lock(&data->start_lock);
qemu_mutex_unlock(&data->start_lock);
+ g_usleep(500000);
+ event_notifier_set(&data->notifier);
aio_context_acquire(ctx);
aio_context_release(ctx);
@@ -126,20 +129,19 @@ static void *test_acquire_thread(void *opaque)
return NULL;
}
-static void dummy_notifier_read(EventNotifier *unused)
+static void dummy_notifier_read(EventNotifier *n)
{
- g_assert(false); /* should never be invoked */
+ event_notifier_test_and_clear(n);
}
static void test_acquire(void)
{
QemuThread thread;
- EventNotifier notifier;
AcquireTestData data;
/* Dummy event notifier ensures aio_poll() will block */
- event_notifier_init(¬ifier, false);
- aio_set_event_notifier(ctx, ¬ifier, dummy_notifier_read);
+ event_notifier_init(&data.notifier, false);
+ aio_set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
qemu_mutex_init(&data.start_lock);
@@ -150,12 +152,13 @@ static void test_acquire(void)
/* Block in aio_poll(), let other thread kick us and acquire context */
aio_context_acquire(ctx);
qemu_mutex_unlock(&data.start_lock); /* let the thread run */
- g_assert(!aio_poll(ctx, true));
+ g_assert(aio_poll(ctx, true));
+ g_assert(!data.thread_acquired);
aio_context_release(ctx);
qemu_thread_join(&thread);
- aio_set_event_notifier(ctx, ¬ifier, NULL);
- event_notifier_cleanup(¬ifier);
+ aio_set_event_notifier(ctx, &data.notifier, NULL);
+ event_notifier_cleanup(&data.notifier);
g_assert(data.thread_acquired);
}
--
2.3.0
next prev parent reply other threads:[~2015-02-20 16:27 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-02-20 16:26 [Qemu-devel] [PATCH RFC 0/3] iothread: release iothread around aio_poll Paolo Bonzini
2015-02-20 16:26 ` [Qemu-devel] [PATCH 1/3] aio-posix: move pollfds to thread-local storage Paolo Bonzini
2015-03-06 16:50 ` Stefan Hajnoczi
2015-02-20 16:26 ` [Qemu-devel] [PATCH 2/3] AioContext: acquire/release AioContext during aio_poll Paolo Bonzini
2015-02-25 5:45 ` Fam Zheng
2015-02-26 13:21 ` Paolo Bonzini
2015-03-06 17:15 ` Stefan Hajnoczi
2015-07-08 2:18 ` Fam Zheng
2015-07-08 7:52 ` Paolo Bonzini
2015-02-20 16:26 ` Paolo Bonzini [this message]
2015-03-06 17:16 ` [Qemu-devel] [PATCH 3/3] iothread: release iothread around aio_poll Stefan Hajnoczi
2015-03-31 10:35 ` [Qemu-devel] [PATCH RFC 0/3] " Paolo Bonzini
2015-03-31 14:33 ` Stefan Hajnoczi
2015-04-21 15:40 ` Stefan Hajnoczi
2015-04-21 15:54 ` Paolo Bonzini
2015-04-22 10:26 ` Stefan Hajnoczi
2015-03-31 14:03 ` Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1424449612-18215-4-git-send-email-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=famz@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).