From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: stefanha@redhat.com, famz@redhat.com
Subject: [Qemu-devel] [PATCH 08/10] aio-win32: remove walking_handlers, protecting AioHandler list with list_lock
Date: Wed, 4 Jan 2017 14:26:23 +0100 [thread overview]
Message-ID: <20170104132625.28059-9-pbonzini@redhat.com> (raw)
In-Reply-To: <20170104132625.28059-1-pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
aio-win32.c | 73 +++++++++++++++++++++++++++++++++++++++----------------------
1 file changed, 47 insertions(+), 26 deletions(-)
diff --git a/aio-win32.c b/aio-win32.c
index 1ad459d..900524c 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -21,6 +21,7 @@
#include "qemu/queue.h"
#include "qemu/sockets.h"
#include "qapi/error.h"
+#include "qemu/rcu_queue.h"
struct AioHandler {
EventNotifier *e;
@@ -45,6 +46,7 @@ void aio_set_fd_handler(AioContext *ctx,
/* fd is a SOCKET in our case */
AioHandler *node;
+ qemu_lockcnt_lock(&ctx->list_lock);
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
if (node->pfd.fd == fd && !node->deleted) {
break;
@@ -54,14 +56,14 @@ void aio_set_fd_handler(AioContext *ctx,
/* Are we deleting the fd handler? */
if (!io_read && !io_write) {
if (node) {
- /* If the lock is held, just mark the node as deleted */
- if (ctx->walking_handlers) {
+ /* If aio_poll is in progress, just mark the node as deleted */
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
node->deleted = 1;
node->pfd.revents = 0;
} else {
/* Otherwise, delete it for real. We can't just mark it as
* deleted because deleted nodes are only cleaned up after
- * releasing the walking_handlers lock.
+ * releasing the list_lock.
*/
QLIST_REMOVE(node, node);
g_free(node);
@@ -74,7 +76,7 @@ void aio_set_fd_handler(AioContext *ctx,
/* Alloc and insert if it's not already there */
node = g_new0(AioHandler, 1);
node->pfd.fd = fd;
- QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
}
node->pfd.events = 0;
@@ -99,6 +101,7 @@ void aio_set_fd_handler(AioContext *ctx,
FD_CONNECT | FD_WRITE | FD_OOB);
}
+ qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
}
@@ -117,6 +120,7 @@ void aio_set_event_notifier(AioContext *ctx,
{
AioHandler *node;
+ qemu_lockcnt_lock(&ctx->list_lock);
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
if (node->e == e && !node->deleted) {
break;
@@ -128,14 +132,14 @@ void aio_set_event_notifier(AioContext *ctx,
if (node) {
g_source_remove_poll(&ctx->source, &node->pfd);
- /* If the lock is held, just mark the node as deleted */
- if (ctx->walking_handlers) {
+ /* aio_poll is in progress, just mark the node as deleted */
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
node->deleted = 1;
node->pfd.revents = 0;
} else {
/* Otherwise, delete it for real. We can't just mark it as
* deleted because deleted nodes are only cleaned up after
- * releasing the walking_handlers lock.
+ * releasing the list_lock.
*/
QLIST_REMOVE(node, node);
g_free(node);
@@ -149,7 +153,7 @@ void aio_set_event_notifier(AioContext *ctx,
node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
node->pfd.events = G_IO_IN;
node->is_external = is_external;
- QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
g_source_add_poll(&ctx->source, &node->pfd);
}
@@ -157,6 +161,7 @@ void aio_set_event_notifier(AioContext *ctx,
node->io_notify = io_notify;
}
+ qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
}
@@ -175,10 +180,16 @@ bool aio_prepare(AioContext *ctx)
bool have_select_revents = false;
fd_set rfds, wfds;
+ /*
+ * We have to walk very carefully in case aio_set_fd_handler is
+ * called while we're walking.
+ */
+ qemu_lockcnt_inc(&ctx->list_lock);
+
/* fill fd sets */
FD_ZERO(&rfds);
FD_ZERO(&wfds);
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (node->io_read) {
FD_SET ((SOCKET)node->pfd.fd, &rfds);
}
@@ -188,7 +199,7 @@ bool aio_prepare(AioContext *ctx)
}
if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
node->pfd.revents = 0;
if (FD_ISSET(node->pfd.fd, &rfds)) {
node->pfd.revents |= G_IO_IN;
@@ -202,41 +213,53 @@ bool aio_prepare(AioContext *ctx)
}
}
+ qemu_lockcnt_dec(&ctx->list_lock);
return have_select_revents;
}
bool aio_pending(AioContext *ctx)
{
AioHandler *node;
+ bool result = false;
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ /*
+ * We have to walk very carefully in case aio_set_fd_handler is
+ * called while we're walking.
+ */
+ qemu_lockcnt_inc(&ctx->list_lock);
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (node->pfd.revents && node->io_notify) {
- return true;
+ result = true;
+ break;
}
if ((node->pfd.revents & G_IO_IN) && node->io_read) {
- return true;
+ result = true;
+ break;
}
if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
- return true;
+ result = true;
+ break;
}
}
- return false;
+ qemu_lockcnt_dec(&ctx->list_lock);
+ return result;
}
static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
{
- AioHandler *node, *tmp;
+ AioHandler *node;
bool progress = false;
+ AioHandler *tmp;
- ctx->walking_handlers++;
+ qemu_lockcnt_inc(&ctx->list_lock);
/*
* We have to walk very carefully in case aio_set_fd_handler is
* called while we're walking.
*/
- QLIST_FOREACH_SAFE(node, &ctx->aio_handlers, node, tmp) {
+ QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
int revents = node->pfd.revents;
if (!node->deleted &&
@@ -274,16 +297,15 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
}
if (node->deleted) {
- ctx->walking_handlers--;
- if (!ctx->walking_handlers) {
+ if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
QLIST_REMOVE(node, node);
g_free(node);
+ qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
}
- ctx->walking_handlers++;
}
}
- ctx->walking_handlers--;
+ qemu_lockcnt_dec(&ctx->list_lock);
return progress;
}
@@ -321,20 +343,19 @@ bool aio_poll(AioContext *ctx, bool blocking)
atomic_add(&ctx->notify_me, 2);
}
+ qemu_lockcnt_inc(&ctx->list_lock);
have_select_revents = aio_prepare(ctx);
- ctx->walking_handlers++;
-
/* fill fd sets */
count = 0;
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->io_notify
&& aio_node_check(ctx, node->is_external)) {
events[count++] = event_notifier_get_handle(node->e);
}
}
- ctx->walking_handlers--;
+ qemu_lockcnt_dec(&ctx->list_lock);
first = true;
/* ctx->notifier is always registered. */
--
2.9.3
next prev parent reply other threads:[~2017-01-04 13:26 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-01-04 13:26 [Qemu-devel] [PATCH v3 00/10] aio_context_acquire/release pushdown, part 1 Paolo Bonzini
2017-01-04 13:26 ` [Qemu-devel] [PATCH 01/10] aio: rename bh_lock to list_lock Paolo Bonzini
2017-01-04 13:26 ` [Qemu-devel] [PATCH 02/10] qemu-thread: introduce QemuLockCnt Paolo Bonzini
2017-01-11 15:48 ` Fam Zheng
2017-01-11 16:09 ` Paolo Bonzini
2017-01-11 16:35 ` Stefan Hajnoczi
2017-01-11 16:51 ` Paolo Bonzini
2017-01-11 16:56 ` Stefan Hajnoczi
2017-01-04 13:26 ` [Qemu-devel] [PATCH 03/10] aio: make ctx->list_lock a QemuLockCnt, subsuming ctx->walking_bh Paolo Bonzini
2017-01-04 13:26 ` [Qemu-devel] [PATCH 04/10] qemu-thread: optimize QemuLockCnt with futexes on Linux Paolo Bonzini
2017-01-11 16:50 ` Stefan Hajnoczi
2017-01-11 16:52 ` Paolo Bonzini
2017-01-12 13:34 ` Fam Zheng
2017-01-12 15:40 ` Paolo Bonzini
2017-01-04 13:26 ` [Qemu-devel] [PATCH 05/10] aio-posix: split aio_dispatch_handlers out of aio_dispatch Paolo Bonzini
2017-01-11 16:57 ` Stefan Hajnoczi
2017-01-04 13:26 ` [Qemu-devel] [PATCH 06/10] aio: tweak walking in dispatch phase Paolo Bonzini
2017-01-04 13:26 ` [Qemu-devel] [PATCH 07/10] aio-posix: remove walking_handlers, protecting AioHandler list with list_lock Paolo Bonzini
2017-01-11 17:01 ` Stefan Hajnoczi
2017-01-04 13:26 ` Paolo Bonzini [this message]
2017-01-04 13:26 ` [Qemu-devel] [PATCH 09/10] aio: document locking Paolo Bonzini
2017-01-04 13:26 ` [Qemu-devel] [PATCH 10/10] async: optimize aio_bh_poll Paolo Bonzini
2017-01-10 22:22 ` [Qemu-devel] [PATCH v3 00/10] aio_context_acquire/release pushdown, part 1 Paolo Bonzini
-- strict thread matches above, loose matches on Subject: below --
2017-01-12 18:07 [Qemu-devel] [PATCH v5 " Paolo Bonzini
2017-01-12 18:07 ` [Qemu-devel] [PATCH 08/10] aio-win32: remove walking_handlers, protecting AioHandler list with list_lock Paolo Bonzini
2017-01-12 16:55 [Qemu-devel] [PATCH v4 00/10] aio_context_acquire/release pushdown, part 1 Paolo Bonzini
2017-01-12 16:55 ` [Qemu-devel] [PATCH 08/10] aio-win32: remove walking_handlers, protecting AioHandler list with list_lock Paolo Bonzini
2016-12-21 14:03 [Qemu-devel] [PATCH v2 00/10] aio_context_acquire/release pushdown, part 1 Paolo Bonzini
2016-12-21 14:03 ` [Qemu-devel] [PATCH 08/10] aio-win32: remove walking_handlers, protecting AioHandler list with list_lock Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170104132625.28059-9-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=famz@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).