qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Subject: [Qemu-devel] [PULL 21/35] aio-win32: add support for sockets
Date: Fri, 29 Aug 2014 17:29:49 +0100	[thread overview]
Message-ID: <1409329803-20744-22-git-send-email-stefanha@redhat.com> (raw)
In-Reply-To: <1409329803-20744-1-git-send-email-stefanha@redhat.com>

From: Paolo Bonzini <pbonzini@redhat.com>

Uses the same select/WSAEventSelect scheme as main-loop.c.
WSAEventSelect() is edge-triggered, so it cannot be used
directly, but it is still used as a way to exit from a
blocking g_poll().

Before g_poll() is called, we poll sockets with a non-blocking
select() to achieve the level-triggered semantics we require:
if a socket is ready, the g_poll() is made non-blocking too.

Based on a patch from Or Goshen.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 aio-win32.c         | 150 ++++++++++++++++++++++++++++++++++++++++++++++++++--
 block/Makefile.objs |   2 -
 include/block/aio.h |   2 -
 3 files changed, 145 insertions(+), 9 deletions(-)

diff --git a/aio-win32.c b/aio-win32.c
index 4542270..61e3d2d 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -22,12 +22,80 @@
 
 struct AioHandler {
     EventNotifier *e;
+    IOHandler *io_read;
+    IOHandler *io_write;
     EventNotifierHandler *io_notify;
     GPollFD pfd;
     int deleted;
+    void *opaque;
     QLIST_ENTRY(AioHandler) node;
 };
 
+void aio_set_fd_handler(AioContext *ctx,
+                        int fd,
+                        IOHandler *io_read,
+                        IOHandler *io_write,
+                        void *opaque)
+{
+    /* fd is a SOCKET in our case */
+    AioHandler *node;
+
+    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+        if (node->pfd.fd == fd && !node->deleted) {
+            break;
+        }
+    }
+
+    /* Are we deleting the fd handler? */
+    if (!io_read && !io_write) {
+        if (node) {
+            /* If the lock is held, just mark the node as deleted */
+            if (ctx->walking_handlers) {
+                node->deleted = 1;
+                node->pfd.revents = 0;
+            } else {
+                /* Otherwise, delete it for real.  We can't just mark it as
+                 * deleted because deleted nodes are only cleaned up after
+                 * releasing the walking_handlers lock.
+                 */
+                QLIST_REMOVE(node, node);
+                g_free(node);
+            }
+        }
+    } else {
+        HANDLE event;
+
+        if (node == NULL) {
+            /* Alloc and insert if it's not already there */
+            node = g_malloc0(sizeof(AioHandler));
+            node->pfd.fd = fd;
+            QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
+        }
+
+        node->pfd.events = 0;
+        if (node->io_read) {
+            node->pfd.events |= G_IO_IN;
+        }
+        if (node->io_write) {
+            node->pfd.events |= G_IO_OUT;
+        }
+
+        node->e = &ctx->notifier;
+
+        /* Update handler with latest information */
+        node->opaque = opaque;
+        node->io_read = io_read;
+        node->io_write = io_write;
+
+        event = event_notifier_get_handle(&ctx->notifier);
+        WSAEventSelect(node->pfd.fd, event,
+                       FD_READ | FD_ACCEPT | FD_CLOSE |
+                       FD_CONNECT | FD_WRITE | FD_OOB);
+    }
+
+    aio_notify(ctx);
+}
+
 void aio_set_event_notifier(AioContext *ctx,
                             EventNotifier *e,
                             EventNotifierHandler *io_notify)
@@ -78,7 +146,39 @@ void aio_set_event_notifier(AioContext *ctx,
 
 bool aio_prepare(AioContext *ctx)
 {
-    return false;
+    static struct timeval tv0;
+    AioHandler *node;
+    bool have_select_revents = false;
+    fd_set rfds, wfds;
+
+    /* fill fd sets */
+    FD_ZERO(&rfds);
+    FD_ZERO(&wfds);
+    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+        if (node->io_read) {
+            FD_SET ((SOCKET)node->pfd.fd, &rfds);
+        }
+        if (node->io_write) {
+            FD_SET ((SOCKET)node->pfd.fd, &wfds);
+        }
+    }
+
+    if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
+        QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+            node->pfd.revents = 0;
+            if (FD_ISSET(node->pfd.fd, &rfds)) {
+                node->pfd.revents |= G_IO_IN;
+                have_select_revents = true;
+            }
+
+            if (FD_ISSET(node->pfd.fd, &wfds)) {
+                node->pfd.revents |= G_IO_OUT;
+                have_select_revents = true;
+            }
+        }
+    }
+
+    return have_select_revents;
 }
 
 bool aio_pending(AioContext *ctx)
@@ -89,6 +189,13 @@ bool aio_pending(AioContext *ctx)
         if (node->pfd.revents && node->io_notify) {
             return true;
         }
+
+        if ((node->pfd.revents & G_IO_IN) && node->io_read) {
+            return true;
+        }
+        if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
+            return true;
+        }
     }
 
     return false;
@@ -106,11 +213,12 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
     node = QLIST_FIRST(&ctx->aio_handlers);
     while (node) {
         AioHandler *tmp;
+        int revents = node->pfd.revents;
 
         ctx->walking_handlers++;
 
         if (!node->deleted &&
-            (node->pfd.revents || event_notifier_get_handle(node->e) == event) &&
+            (revents || event_notifier_get_handle(node->e) == event) &&
             node->io_notify) {
             node->pfd.revents = 0;
             node->io_notify(node->e);
@@ -121,6 +229,28 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
             }
         }
 
+        if (!node->deleted &&
+            (node->io_read || node->io_write)) {
+            node->pfd.revents = 0;
+            if ((revents & G_IO_IN) && node->io_read) {
+                node->io_read(node->opaque);
+                progress = true;
+            }
+            if ((revents & G_IO_OUT) && node->io_write) {
+                node->io_write(node->opaque);
+                progress = true;
+            }
+
+            /* if the next select() will return an event, we have progressed */
+            if (event == event_notifier_get_handle(&ctx->notifier)) {
+                WSANETWORKEVENTS ev;
+                WSAEnumNetworkEvents(node->pfd.fd, event, &ev);
+                if (ev.lNetworkEvents) {
+                    progress = true;
+                }
+            }
+        }
+
         tmp = node;
         node = QLIST_NEXT(node, node);
 
@@ -149,10 +279,15 @@ bool aio_poll(AioContext *ctx, bool blocking)
 {
     AioHandler *node;
     HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
-    bool was_dispatching, progress, first;
+    bool was_dispatching, progress, have_select_revents, first;
     int count;
     int timeout;
 
+    if (aio_prepare(ctx)) {
+        blocking = false;
+        have_select_revents = true;
+    }
+
     was_dispatching = ctx->dispatching;
     progress = false;
 
@@ -183,6 +318,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
 
     /* wait until next event */
     while (count > 0) {
+        HANDLE event;
         int ret;
 
         timeout = blocking
@@ -196,13 +332,17 @@ bool aio_poll(AioContext *ctx, bool blocking)
         first = false;
 
         /* if we have any signaled events, dispatch event */
-        if ((DWORD) (ret - WAIT_OBJECT_0) >= count) {
+        event = NULL;
+        if ((DWORD) (ret - WAIT_OBJECT_0) < count) {
+            event = events[ret - WAIT_OBJECT_0];
+        } else if (!have_select_revents) {
             break;
         }
 
+        have_select_revents = false;
         blocking = false;
 
-        progress |= aio_dispatch_handlers(ctx, events[ret - WAIT_OBJECT_0]);
+        progress |= aio_dispatch_handlers(ctx, event);
 
         /* Try again, but only call each handler once.  */
         events[ret - WAIT_OBJECT_0] = events[--count];
diff --git a/block/Makefile.objs b/block/Makefile.objs
index 858d2b3..f45f939 100644
--- a/block/Makefile.objs
+++ b/block/Makefile.objs
@@ -10,7 +10,6 @@ block-obj-$(CONFIG_WIN32) += raw-win32.o win32-aio.o
 block-obj-$(CONFIG_POSIX) += raw-posix.o
 block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
 
-ifeq ($(CONFIG_POSIX),y)
 block-obj-y += nbd.o nbd-client.o sheepdog.o
 block-obj-$(CONFIG_LIBISCSI) += iscsi.o
 block-obj-$(CONFIG_LIBNFS) += nfs.o
@@ -19,7 +18,6 @@ block-obj-$(CONFIG_RBD) += rbd.o
 block-obj-$(CONFIG_GLUSTERFS) += gluster.o
 block-obj-$(CONFIG_ARCHIPELAGO) += archipelago.o
 block-obj-$(CONFIG_LIBSSH2) += ssh.o
-endif
 
 common-obj-y += stream.o
 common-obj-y += commit.o
diff --git a/include/block/aio.h b/include/block/aio.h
index ef4197b..4603c0f 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -239,7 +239,6 @@ bool aio_dispatch(AioContext *ctx);
  */
 bool aio_poll(AioContext *ctx, bool blocking);
 
-#ifdef CONFIG_POSIX
 /* Register a file descriptor and associated callbacks.  Behaves very similarly
  * to qemu_set_fd_handler2.  Unlike qemu_set_fd_handler2, these callbacks will
  * be invoked when using aio_poll().
@@ -252,7 +251,6 @@ void aio_set_fd_handler(AioContext *ctx,
                         IOHandler *io_read,
                         IOHandler *io_write,
                         void *opaque);
-#endif
 
 /* Register an event notifier and associated callbacks.  Behaves very similarly
  * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
-- 
1.9.3

  parent reply	other threads:[~2014-08-29 16:31 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-08-29 16:29 [Qemu-devel] [PULL 00/35] Block patches Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 01/35] ide: Fix bootindex for bus_id > 9 Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 02/35] block.curl: adding 'timeout' option Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 03/35] qemu-img: fix img_commit() error return value Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 04/35] qemu-img: fix img_compare() flags error path Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 05/35] qemu-img: always goto out in img_snapshot() error paths Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 06/35] sheepdog: adopting protocol update for VDI locking Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 07/35] sheepdog: improve error handling for a case of failed lock Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 08/35] qapi: add read-pattern enum for quorum Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 09/35] block/quorum: add simple read pattern support Stefan Hajnoczi
2014-08-29 16:47   ` Benoît Canet
2014-09-01 15:21     ` Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 10/35] coroutine: Drop co_sleep_ns Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 11/35] blockdev: fix drive-mirror 'granularity' error message Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 12/35] AioContext: take bottom halves into account when computing aio_poll timeout Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 13/35] aio-win32: Evaluate timers after handles Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 14/35] aio-win32: Factor out duplicate code into aio_dispatch_handlers Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 15/35] AioContext: run bottom halves after polling Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 16/35] AioContext: export and use aio_dispatch Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 17/35] test-aio: test timers on Windows too Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 18/35] aio-win32: add aio_set_dispatching optimization Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 19/35] AioContext: introduce aio_prepare Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 20/35] qemu-coroutine-io: fix for Win32 Stefan Hajnoczi
2014-08-29 16:29 ` Stefan Hajnoczi [this message]
2014-08-29 16:29 ` [Qemu-devel] [PULL 22/35] sheepdog: fix a core dump while do auto-reconnecting Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 23/35] nbd: Drop nbd_can_read() Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 24/35] block: Add AIO context notifiers Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 25/35] nbd: Follow the BDS' AIO context Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 26/35] block: fix overlapping multiwrite requests Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 27/35] qemu-iotests: add multiwrite test cases Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 28/35] linux-aio: avoid deadlock in nested aio_poll() calls Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 29/35] block: acquire AioContext in do_drive_del() Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 30/35] virtio-blk: allow drive_del with dataplane Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 31/35] curl: Allow a cookie or cookies to be sent with http/https requests Stefan Hajnoczi
2014-08-29 16:30 ` [Qemu-devel] [PULL 32/35] curl: Don't deref NULL pointer in call to aio_poll Stefan Hajnoczi
2014-08-29 16:30 ` [Qemu-devel] [PULL 33/35] nfs: Fix leak of opts in nfs_file_open Stefan Hajnoczi
2014-08-29 16:30 ` [Qemu-devel] [PULL 34/35] blkverify: Fix leak of opts in blkverify_open Stefan Hajnoczi
2014-08-29 16:30 ` [Qemu-devel] [PULL 35/35] quorum: Fix leak of opts in quorum_open Stefan Hajnoczi
2014-09-01  9:49 ` [Qemu-devel] [PULL 00/35] Block patches Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1409329803-20744-22-git-send-email-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).