qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Fam Zheng <famz@redhat.com>
To: qemu-devel@nongnu.org
Cc: kwolf@redhat.com, pbonzini@redhat.com, stefanha@redhat.com,
	qemu-block@nongnu.org
Subject: [Qemu-devel] [PATCH v2 03/11] block: Mark fd handlers as "protocol"
Date: Wed, 29 Jul 2015 12:42:06 +0800	[thread overview]
Message-ID: <1438144934-23619-4-git-send-email-famz@redhat.com> (raw)
In-Reply-To: <1438144934-23619-1-git-send-email-famz@redhat.com>

The "protocol" type includes all the fd handlers and event notifiers used by
block layer, especially those that should be polled in a nested event loop.

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 block/curl.c        |  8 ++++----
 block/iscsi.c       |  4 ++--
 block/linux-aio.c   |  4 ++--
 block/nbd-client.c  |  8 ++++----
 block/nfs.c         |  6 +++---
 block/sheepdog.c    | 22 +++++++++++-----------
 block/ssh.c         |  4 ++--
 block/win32-aio.c   |  4 ++--
 include/block/aio.h |  1 +
 9 files changed, 31 insertions(+), 30 deletions(-)

diff --git a/block/curl.c b/block/curl.c
index 6925672..75d237c 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -154,19 +154,19 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
     DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd);
     switch (action) {
         case CURL_POLL_IN:
-            aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_UNSPECIFIED,
+            aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_PROTOCOL,
                                curl_multi_read, NULL, state);
             break;
         case CURL_POLL_OUT:
-            aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_UNSPECIFIED,
+            aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_PROTOCOL,
                                NULL, curl_multi_do, state);
             break;
         case CURL_POLL_INOUT:
-            aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_UNSPECIFIED,
+            aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_PROTOCOL,
                                curl_multi_read, curl_multi_do, state);
             break;
         case CURL_POLL_REMOVE:
-            aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_UNSPECIFIED,
+            aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_PROTOCOL,
                                NULL, NULL, NULL);
             break;
     }
diff --git a/block/iscsi.c b/block/iscsi.c
index 0ee1295..1713625 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -292,7 +292,7 @@ iscsi_set_events(IscsiLun *iscsilun)
 
     if (ev != iscsilun->events) {
         aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsi),
-                           AIO_CLIENT_UNSPECIFIED,
+                           AIO_CLIENT_PROTOCOL,
                            (ev & POLLIN) ? iscsi_process_read : NULL,
                            (ev & POLLOUT) ? iscsi_process_write : NULL,
                            iscsilun);
@@ -1277,7 +1277,7 @@ static void iscsi_detach_aio_context(BlockDriverState *bs)
     IscsiLun *iscsilun = bs->opaque;
 
     aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi),
-                       AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+                       AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
     iscsilun->events = 0;
 
     if (iscsilun->nop_timer) {
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 0921bde..1491684 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -287,7 +287,7 @@ void laio_detach_aio_context(void *s_, AioContext *old_context)
 {
     struct qemu_laio_state *s = s_;
 
-    aio_set_event_notifier(old_context, &s->e, AIO_CLIENT_UNSPECIFIED, NULL);
+    aio_set_event_notifier(old_context, &s->e, AIO_CLIENT_PROTOCOL, NULL);
     qemu_bh_delete(s->completion_bh);
 }
 
@@ -296,7 +296,7 @@ void laio_attach_aio_context(void *s_, AioContext *new_context)
     struct qemu_laio_state *s = s_;
 
     s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
-    aio_set_event_notifier(new_context, &s->e, AIO_CLIENT_UNSPECIFIED,
+    aio_set_event_notifier(new_context, &s->e, AIO_CLIENT_PROTOCOL,
                            qemu_laio_completion_cb);
 }
 
diff --git a/block/nbd-client.c b/block/nbd-client.c
index 36c46c5..edf2199 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -124,7 +124,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
     s->send_coroutine = qemu_coroutine_self();
     aio_context = bdrv_get_aio_context(bs);
 
-    aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_UNSPECIFIED,
+    aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_PROTOCOL,
                        nbd_reply_ready, nbd_restart_write, bs);
     if (qiov) {
         if (!s->is_unix) {
@@ -144,7 +144,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
     } else {
         rc = nbd_send_request(s->sock, request);
     }
-    aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_UNSPECIFIED,
+    aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_PROTOCOL,
                        nbd_reply_ready, NULL, bs);
     s->send_coroutine = NULL;
     qemu_co_mutex_unlock(&s->send_mutex);
@@ -350,14 +350,14 @@ void nbd_client_detach_aio_context(BlockDriverState *bs)
 {
     aio_set_fd_handler(bdrv_get_aio_context(bs),
                        nbd_get_client_session(bs)->sock,
-                       AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+                       AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
 }
 
 void nbd_client_attach_aio_context(BlockDriverState *bs,
                                    AioContext *new_context)
 {
     aio_set_fd_handler(new_context, nbd_get_client_session(bs)->sock,
-                       AIO_CLIENT_UNSPECIFIED, nbd_reply_ready, NULL, bs);
+                       AIO_CLIENT_PROTOCOL, nbd_reply_ready, NULL, bs);
 }
 
 void nbd_client_close(BlockDriverState *bs)
diff --git a/block/nfs.c b/block/nfs.c
index a21dd6f..4d12067 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -63,7 +63,7 @@ static void nfs_set_events(NFSClient *client)
     int ev = nfs_which_events(client->context);
     if (ev != client->events) {
         aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
-                           AIO_CLIENT_UNSPECIFIED,
+                           AIO_CLIENT_PROTOCOL,
                            (ev & POLLIN) ? nfs_process_read : NULL,
                            (ev & POLLOUT) ? nfs_process_write : NULL, client);
 
@@ -241,7 +241,7 @@ static void nfs_detach_aio_context(BlockDriverState *bs)
     NFSClient *client = bs->opaque;
 
     aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
-                       AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+                       AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
     client->events = 0;
 }
 
@@ -261,7 +261,7 @@ static void nfs_client_close(NFSClient *client)
             nfs_close(client->context, client->fh);
         }
         aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
-                           AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+                           AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
         nfs_destroy_context(client->context);
     }
     memset(client, 0, sizeof(NFSClient));
diff --git a/block/sheepdog.c b/block/sheepdog.c
index e0552b7..de9f8be 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -624,7 +624,7 @@ static coroutine_fn void do_co_req(void *opaque)
     unsigned int *rlen = srco->rlen;
 
     co = qemu_coroutine_self();
-    aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_UNSPECIFIED,
+    aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_PROTOCOL,
                        NULL, restart_co_req, co);
 
     ret = send_co_req(sockfd, hdr, data, wlen);
@@ -632,7 +632,7 @@ static coroutine_fn void do_co_req(void *opaque)
         goto out;
     }
 
-    aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_UNSPECIFIED,
+    aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_PROTOCOL,
                        restart_co_req, NULL, co);
 
     ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
@@ -658,7 +658,7 @@ static coroutine_fn void do_co_req(void *opaque)
 out:
     /* there is at most one request for this sockfd, so it is safe to
      * set each handler to NULL. */
-    aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_UNSPECIFIED,
+    aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_PROTOCOL,
                        NULL, NULL, NULL);
 
     srco->ret = ret;
@@ -743,7 +743,7 @@ static coroutine_fn void reconnect_to_sdog(void *opaque)
     BDRVSheepdogState *s = opaque;
     AIOReq *aio_req, *next;
 
-    aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_UNSPECIFIED, NULL,
+    aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_PROTOCOL, NULL,
                        NULL, NULL);
     close(s->fd);
     s->fd = -1;
@@ -957,7 +957,7 @@ static int get_sheep_fd(BDRVSheepdogState *s, Error **errp)
         return fd;
     }
 
-    aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_UNSPECIFIED,
+    aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_PROTOCOL,
                        co_read_response, NULL, s);
     return fd;
 }
@@ -1213,7 +1213,7 @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
 
     qemu_co_mutex_lock(&s->lock);
     s->co_send = qemu_coroutine_self();
-    aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_UNSPECIFIED,
+    aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_PROTOCOL,
                        co_read_response, co_write_request, s);
     socket_set_cork(s->fd, 1);
 
@@ -1232,7 +1232,7 @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
     }
 out:
     socket_set_cork(s->fd, 0);
-    aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_UNSPECIFIED,
+    aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_PROTOCOL,
                        co_read_response, NULL, s);
     s->co_send = NULL;
     qemu_co_mutex_unlock(&s->lock);
@@ -1411,7 +1411,7 @@ static void sd_detach_aio_context(BlockDriverState *bs)
 {
     BDRVSheepdogState *s = bs->opaque;
 
-    aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_UNSPECIFIED, NULL,
+    aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_PROTOCOL, NULL,
                        NULL, NULL);
 }
 
@@ -1421,7 +1421,7 @@ static void sd_attach_aio_context(BlockDriverState *bs,
     BDRVSheepdogState *s = bs->opaque;
 
     s->aio_context = new_context;
-    aio_set_fd_handler(new_context, s->fd, AIO_CLIENT_UNSPECIFIED,
+    aio_set_fd_handler(new_context, s->fd, AIO_CLIENT_PROTOCOL,
                        co_read_response, NULL, s);
 }
 
@@ -1537,7 +1537,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags,
     return 0;
 out:
     aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
-                       AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+                       AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
     if (s->fd >= 0) {
         closesocket(s->fd);
     }
@@ -1922,7 +1922,7 @@ static void sd_close(BlockDriverState *bs)
     }
 
     aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
-                       AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+                       AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
     closesocket(s->fd);
     g_free(s->host_spec);
 }
diff --git a/block/ssh.c b/block/ssh.c
index 71d7ffe..3e721d2 100644
--- a/block/ssh.c
+++ b/block/ssh.c
@@ -803,7 +803,7 @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s, BlockDriverState *bs)
             rd_handler, wr_handler);
 
     aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
-                       AIO_CLIENT_UNSPECIFIED, rd_handler, wr_handler, co);
+                       AIO_CLIENT_PROTOCOL, rd_handler, wr_handler, co);
 }
 
 static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
@@ -811,7 +811,7 @@ static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
 {
     DPRINTF("s->sock=%d", s->sock);
     aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
-                       AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+                       AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
 }
 
 /* A non-blocking call returned EAGAIN, so yield, ensuring the
diff --git a/block/win32-aio.c b/block/win32-aio.c
index 0081886..57b1916 100644
--- a/block/win32-aio.c
+++ b/block/win32-aio.c
@@ -174,7 +174,7 @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile)
 void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
                                   AioContext *old_context)
 {
-    aio_set_event_notifier(old_context, &aio->e, AIO_CLIENT_UNSPECIFIED, NULL);
+    aio_set_event_notifier(old_context, &aio->e, AIO_CLIENT_PROTOCOL, NULL);
     aio->is_aio_context_attached = false;
 }
 
@@ -182,7 +182,7 @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
                                   AioContext *new_context)
 {
     aio->is_aio_context_attached = true;
-    aio_set_event_notifier(new_context, &aio->e, AIO_CLIENT_UNSPECIFIED,
+    aio_set_event_notifier(new_context, &aio->e, AIO_CLIENT_PROTOCOL,
                            win32_aio_completion_cb);
 }
 
diff --git a/include/block/aio.h b/include/block/aio.h
index bd1d44b..d02ddfa 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -273,6 +273,7 @@ bool aio_pending(AioContext *ctx);
 bool aio_dispatch(AioContext *ctx);
 
 #define AIO_CLIENT_UNSPECIFIED    (1 << 0)
+#define AIO_CLIENT_PROTOCOL       (1 << 1)
 #define AIO_CLIENT_MASK_ALL       -1
 
 /* Progress in completing AIO work to occur.  This can issue new pending
-- 
2.4.3

  parent reply	other threads:[~2015-07-29  4:42 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-07-29  4:42 [Qemu-devel] [PATCH v2 00/11] aio: Introduce handler type to fix nested aio_poll for dataplane Fam Zheng
2015-07-29  4:42 ` [Qemu-devel] [PATCH v2 01/11] aio: Introduce "type" in aio_set_fd_handler and aio_set_event_notifier Fam Zheng
2015-08-27 13:50   ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2015-07-29  4:42 ` [Qemu-devel] [PATCH v2 02/11] aio: Save type to AioHandler Fam Zheng
2015-08-27 13:50   ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2015-07-29  4:42 ` Fam Zheng [this message]
2015-08-27 13:53   ` [Qemu-devel] [Qemu-block] [PATCH v2 03/11] block: Mark fd handlers as "protocol" Stefan Hajnoczi
2015-09-07  4:43     ` Fam Zheng
2015-07-29  4:42 ` [Qemu-devel] [PATCH v2 04/11] nbd: Mark fd handlers client type as "nbd server" Fam Zheng
2015-08-27 14:02   ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2015-07-29  4:42 ` [Qemu-devel] [PATCH v2 05/11] aio: Mark ctx->notifier's client type as "context" Fam Zheng
2015-08-27 17:12   ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2015-07-29  4:42 ` [Qemu-devel] [PATCH v2 06/11] dataplane: Mark host notifiers' client type as "dataplane" Fam Zheng
2015-08-27 17:14   ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2015-07-29  4:42 ` [Qemu-devel] [PATCH v2 07/11] aio-posix: introduce aio_{disable, enable}_clients Fam Zheng
2015-08-27 17:23   ` Stefan Hajnoczi
2015-09-07  5:26     ` Fam Zheng
2015-07-29  4:42 ` [Qemu-devel] [PATCH v2 08/11] aio-win32: Implement " Fam Zheng
2015-07-29  4:42 ` [Qemu-devel] [PATCH v2 09/11] block: Introduce bdrv_aio_poll Fam Zheng
2015-08-27 17:25   ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2015-08-28 11:50   ` Stefan Hajnoczi
2015-07-29  4:42 ` [Qemu-devel] [PATCH v2 10/11] block: Replace nested aio_poll with bdrv_aio_poll Fam Zheng
2015-08-28 11:50   ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2015-07-29  4:42 ` [Qemu-devel] [PATCH v2 11/11] block: Only poll block layer fds in bdrv_aio_poll Fam Zheng
2015-07-29  7:36   ` Paolo Bonzini
2015-07-29  7:37   ` Paolo Bonzini
2015-07-29 10:57     ` Fam Zheng
2015-07-29 11:02       ` Paolo Bonzini
2015-07-29 11:53         ` Fam Zheng
2015-07-29 12:03           ` Paolo Bonzini
2015-07-30  1:35             ` Fam Zheng
2015-07-30 13:22               ` Paolo Bonzini
2015-09-09  3:22             ` Fam Zheng
2015-09-11  8:15               ` Paolo Bonzini
2015-09-11  9:14                 ` Fam Zheng
2015-09-11  9:36                   ` [Qemu-devel] [Qemu-block] " Alberto Garcia
2015-09-11  9:43                     ` Daniel P. Berrange
2015-09-11  9:44                     ` Fam Zheng
2015-09-11  9:54                       ` Paolo Bonzini
2015-09-11 10:40                         ` Fam Zheng
2015-09-11 10:46                           ` Paolo Bonzini
2015-09-11 11:01                             ` Fam Zheng
2015-09-11 11:02                               ` Paolo Bonzini
2015-09-11 11:12                                 ` Fam Zheng
2015-09-11  9:45                     ` Paolo Bonzini
2015-07-29  7:38 ` [Qemu-devel] [PATCH v2 00/11] aio: Introduce handler type to fix nested aio_poll for dataplane Paolo Bonzini
2015-08-28 11:53 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2015-09-07  6:28   ` Fam Zheng
2015-09-11 10:39 ` [Qemu-devel] " Kevin Wolf
2015-09-11 11:46   ` Fam Zheng
2015-09-11 12:22     ` Kevin Wolf
2015-09-14  7:27       ` Fam Zheng
2015-09-14  8:40         ` Kevin Wolf
2015-09-28  9:31       ` Stefan Hajnoczi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1438144934-23619-4-git-send-email-famz@redhat.com \
    --to=famz@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).