From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH 6/7] aio: simplify qemu_aio_wait
Date: Mon, 12 Mar 2012 19:22:27 +0100 [thread overview]
Message-ID: <1331576548-23067-7-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1331576548-23067-1-git-send-email-pbonzini@redhat.com>
The do...while loop can never loop, because select will just not return
0 when invoked with infinite timeout.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
aio.c | 133 +++++++++++++++++++++++++++++++---------------------------------
1 files changed, 64 insertions(+), 69 deletions(-)
diff --git a/aio.c b/aio.c
index 71264fc..2d82dfe 100644
--- a/aio.c
+++ b/aio.c
@@ -108,7 +108,11 @@ void qemu_aio_flush(void)
bool qemu_aio_wait(void)
{
+ AioHandler *node;
+ fd_set rdfds, wrfds;
+ int max_fd = -1;
int ret;
+ bool busy;
/*
* If there are callbacks left that have been queued, we need to call then.
@@ -119,85 +123,76 @@ bool qemu_aio_wait(void)
return true;
}
- do {
- AioHandler *node;
- fd_set rdfds, wrfds;
- bool busy;
- int max_fd = -1;
+ walking_handlers = 1;
- walking_handlers = 1;
+ FD_ZERO(&rdfds);
+ FD_ZERO(&wrfds);
- FD_ZERO(&rdfds);
- FD_ZERO(&wrfds);
-
- /* fill fd sets */
- busy = false;
- QLIST_FOREACH(node, &aio_handlers, node) {
- /* If there aren't pending AIO operations, don't invoke callbacks.
- * Otherwise, if there are no AIO requests, qemu_aio_wait() would
- * wait indefinitely.
- */
- if (node->io_flush) {
- if (node->io_flush(node->opaque) == 0) {
- continue;
- }
- busy = true;
- }
- if (!node->deleted && node->io_read) {
- FD_SET(node->fd, &rdfds);
- max_fd = MAX(max_fd, node->fd + 1);
- }
- if (!node->deleted && node->io_write) {
- FD_SET(node->fd, &wrfds);
- max_fd = MAX(max_fd, node->fd + 1);
+ /* fill fd sets */
+ busy = false;
+ QLIST_FOREACH(node, &aio_handlers, node) {
+ /* If there aren't pending AIO operations, don't invoke callbacks.
+ * Otherwise, if there are no AIO requests, qemu_aio_wait() would
+ * wait indefinitely.
+ */
+ if (node->io_flush) {
+ if (node->io_flush(node->opaque) == 0) {
+ continue;
}
+ busy = true;
+ }
+ if (!node->deleted && node->io_read) {
+ FD_SET(node->fd, &rdfds);
+ max_fd = MAX(max_fd, node->fd + 1);
}
+ if (!node->deleted && node->io_write) {
+ FD_SET(node->fd, &wrfds);
+ max_fd = MAX(max_fd, node->fd + 1);
+ }
+ }
- walking_handlers = 0;
+ walking_handlers = 0;
- /* No AIO operations? Get us out of here */
- if (!busy) {
- return false;
- }
+ /* No AIO operations? Get us out of here */
+ if (!busy) {
+ return false;
+ }
- /* wait until next event */
- ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
- if (ret == -1 && errno == EINTR)
- continue;
-
- /* if we have any readable fds, dispatch event */
- if (ret > 0) {
- walking_handlers = 1;
-
- /* we have to walk very carefully in case
- * qemu_aio_set_fd_handler is called while we're walking */
- node = QLIST_FIRST(&aio_handlers);
- while (node) {
- AioHandler *tmp;
-
- if (!node->deleted &&
- FD_ISSET(node->fd, &rdfds) &&
- node->io_read) {
- node->io_read(node->opaque);
- }
- if (!node->deleted &&
- FD_ISSET(node->fd, &wrfds) &&
- node->io_write) {
- node->io_write(node->opaque);
- }
-
- tmp = node;
- node = QLIST_NEXT(node, node);
-
- if (tmp->deleted) {
- QLIST_REMOVE(tmp, node);
- g_free(tmp);
- }
+ /* wait until next event */
+ ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
+
+ /* if we have any readable fds, dispatch event */
+ if (ret > 0) {
+ walking_handlers = 1;
+
+ /* we have to walk very carefully in case
+ * qemu_aio_set_fd_handler is called while we're walking */
+ node = QLIST_FIRST(&aio_handlers);
+ while (node) {
+ AioHandler *tmp;
+
+ if (!node->deleted &&
+ FD_ISSET(node->fd, &rdfds) &&
+ node->io_read) {
+ node->io_read(node->opaque);
+ }
+ if (!node->deleted &&
+ FD_ISSET(node->fd, &wrfds) &&
+ node->io_write) {
+ node->io_write(node->opaque);
}
- walking_handlers = 0;
+ tmp = node;
+ node = QLIST_NEXT(node, node);
+
+ if (tmp->deleted) {
+ QLIST_REMOVE(tmp, node);
+ g_free(tmp);
+ }
}
- } while (ret == 0);
+
+ walking_handlers = 0;
+ }
return true;
}
--
1.7.7.6
next prev parent reply other threads:[~2012-03-12 18:23 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-03-12 18:22 [Qemu-devel] [PATCH 0/7] AIO nested loop and bdrv_drain_all changes Paolo Bonzini
2012-03-12 18:22 ` [Qemu-devel] [PATCH 1/7] qemu-io: use main_loop_wait Paolo Bonzini
2012-03-12 18:22 ` [Qemu-devel] [PATCH 2/7] qemu-tool: map vm_clock to rt_clock Paolo Bonzini
2012-03-12 18:22 ` [Qemu-devel] [PATCH 3/7] posix-aio: merge posix_aio_process_queue and posix_aio_read Paolo Bonzini
2012-03-12 18:22 ` [Qemu-devel] [PATCH 4/7] aio: remove process_queue callback and qemu_aio_process_queue Paolo Bonzini
2012-03-12 18:22 ` [Qemu-devel] [PATCH 5/7] aio: return "AIO in progress" state from qemu_aio_wait Paolo Bonzini
2012-03-12 18:22 ` Paolo Bonzini [this message]
2012-03-12 18:22 ` [Qemu-devel] [PATCH 7/7] block: add the support to drain throttled requests Paolo Bonzini
2012-03-13 1:38 ` Zhi Yong Wu
2012-03-13 1:46 ` Zhi Yong Wu
2012-03-13 1:56 ` Zhi Yong Wu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1331576548-23067-7-git-send-email-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).