qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Fam Zheng <famz@redhat.com>
To: qemu-devel@nongnu.org
Cc: kwolf@redhat.com, pbonzini@redhat.com, stefanha@redhat.com,
	qemu-block@nongnu.org
Subject: [Qemu-devel] [RFC PATCH 08/11] dataplane: Mark host notifiers' client type as "dataplane"
Date: Thu, 23 Jul 2015 14:32:15 +0800	[thread overview]
Message-ID: <1437633138-29188-9-git-send-email-famz@redhat.com> (raw)
In-Reply-To: <1437633138-29188-1-git-send-email-famz@redhat.com>

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 hw/block/dataplane/virtio-blk.c |  4 ++--
 hw/scsi/virtio-scsi-dataplane.c | 16 ++++++++--------
 include/block/aio.h             |  1 +
 3 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index e472154..5419f1c 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -283,7 +283,7 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
 
     /* Get this show started by hooking up our callbacks */
     aio_context_acquire(s->ctx);
-    aio_set_event_notifier(s->ctx, &s->host_notifier, AIO_CLIENT_UNSPECIFIED,
+    aio_set_event_notifier(s->ctx, &s->host_notifier, AIO_CLIENT_DATAPLANE,
                            handle_notify);
     aio_context_release(s->ctx);
     return;
@@ -320,7 +320,7 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
     aio_context_acquire(s->ctx);
 
     /* Stop notifications for new requests from guest */
-    aio_set_event_notifier(s->ctx, &s->host_notifier, AIO_CLIENT_UNSPECIFIED,
+    aio_set_event_notifier(s->ctx, &s->host_notifier, AIO_CLIENT_DATAPLANE,
                            NULL);
 
     /* Drain and switch bs back to the QEMU main loop */
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index f7bab09..55c2524 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -60,7 +60,7 @@ static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
     r = g_slice_new(VirtIOSCSIVring);
     r->host_notifier = *virtio_queue_get_host_notifier(vq);
     r->guest_notifier = *virtio_queue_get_guest_notifier(vq);
-    aio_set_event_notifier(s->ctx, &r->host_notifier, AIO_CLIENT_UNSPECIFIED,
+    aio_set_event_notifier(s->ctx, &r->host_notifier, AIO_CLIENT_DATAPLANE,
                            handler);
 
     r->parent = s;
@@ -72,7 +72,7 @@ static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
     return r;
 
 fail_vring:
-    aio_set_event_notifier(s->ctx, &r->host_notifier, AIO_CLIENT_UNSPECIFIED,
+    aio_set_event_notifier(s->ctx, &r->host_notifier, AIO_CLIENT_DATAPLANE,
                            NULL);
     k->set_host_notifier(qbus->parent, n, false);
     g_slice_free(VirtIOSCSIVring, r);
@@ -165,16 +165,16 @@ static void virtio_scsi_clear_aio(VirtIOSCSI *s)
 
     if (s->ctrl_vring) {
         aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier,
-                               AIO_CLIENT_UNSPECIFIED, NULL);
+                               AIO_CLIENT_DATAPLANE, NULL);
     }
     if (s->event_vring) {
         aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier,
-                               AIO_CLIENT_UNSPECIFIED, NULL);
+                               AIO_CLIENT_DATAPLANE, NULL);
     }
     if (s->cmd_vrings) {
         for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) {
             aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier,
-                                   AIO_CLIENT_UNSPECIFIED, NULL);
+                                   AIO_CLIENT_DATAPLANE, NULL);
         }
     }
 }
@@ -296,12 +296,12 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
     aio_context_acquire(s->ctx);
 
     aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier,
-                           AIO_CLIENT_UNSPECIFIED, NULL);
+                           AIO_CLIENT_DATAPLANE, NULL);
     aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier,
-                           AIO_CLIENT_UNSPECIFIED, NULL);
+                           AIO_CLIENT_DATAPLANE, NULL);
     for (i = 0; i < vs->conf.num_queues; i++) {
         aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier,
-                               AIO_CLIENT_UNSPECIFIED, NULL);
+                               AIO_CLIENT_DATAPLANE, NULL);
     }
 
     blk_drain_all(); /* ensure there are no in-flight requests */
diff --git a/include/block/aio.h b/include/block/aio.h
index 4b53151..cd9a210 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -276,6 +276,7 @@ bool aio_dispatch(AioContext *ctx);
 #define AIO_CLIENT_PROTOCOL       (1 << 1)
 #define AIO_CLIENT_NBD_SERVER     (1 << 2)
 #define AIO_CLIENT_CONTEXT        (1 << 3)
+#define AIO_CLIENT_DATAPLANE      (1 << 4)
 #define AIO_CLIENT_MASK_ALL       -1
 
 /* Progress in completing AIO work to occur.  This can issue new pending
-- 
2.4.3

  parent reply	other threads:[~2015-07-23  6:33 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-07-23  6:32 [Qemu-devel] [RFC PATCH 00/11] aio: Introduce handler type to fix nested aio_poll for dataplane Fam Zheng
2015-07-23  6:32 ` [Qemu-devel] [RFC PATCH 01/11] aio: Introduce "type" in aio_set_fd_handler and aio_set_event_notifier Fam Zheng
2015-07-23  6:32 ` [Qemu-devel] [RFC PATCH 02/11] aio: Save type to AioHandler Fam Zheng
2015-07-23  6:32 ` [Qemu-devel] [RFC PATCH 03/11] aio-posix: Introduce aio_poll_clients Fam Zheng
2015-07-23  6:32 ` [Qemu-devel] [RFC PATCH 04/11] aio-win32: Implement aio_poll_clients Fam Zheng
2015-07-23  6:32 ` [Qemu-devel] [RFC PATCH 05/11] block: Mark fd handlers as "protocol" Fam Zheng
2015-07-23  6:32 ` [Qemu-devel] [RFC PATCH 06/11] nbd: Mark fd handlers client type as "nbd server" Fam Zheng
2015-07-23  6:32 ` [Qemu-devel] [RFC PATCH 07/11] aio: Mark ctx->notifier's client type as "context" Fam Zheng
2015-07-23  6:32 ` Fam Zheng [this message]
2015-07-23  6:32 ` [Qemu-devel] [RFC PATCH 09/11] block: Introduce bdrv_aio_poll Fam Zheng
2015-07-23  6:32 ` [Qemu-devel] [RFC PATCH 10/11] block: Replace nested aio_poll with bdrv_aio_poll Fam Zheng
2015-07-23  6:32 ` [Qemu-devel] [RFC PATCH 11/11] block: Only poll block layer fds in bdrv_aio_poll Fam Zheng
2015-07-23  8:15 ` [Qemu-devel] [RFC PATCH 00/11] aio: Introduce handler type to fix nested aio_poll for dataplane Paolo Bonzini
2015-07-23 11:43   ` Fam Zheng
2015-07-24  7:35     ` Paolo Bonzini
2015-07-27  6:55       ` Fam Zheng
2015-07-27 13:23         ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1437633138-29188-9-git-send-email-famz@redhat.com \
    --to=famz@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).