From: Fam Zheng <famz@redhat.com>
To: qemu-devel@nongnu.org
Cc: Jeff Cody <jcody@redhat.com>, Kevin Wolf <kwolf@redhat.com>,
Max Reitz <mreitz@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>,
qemu-block@nongnu.org
Subject: [Qemu-devel] [PATCH 2/4] virtio: Always use aio path to set host handler
Date: Fri, 24 Jun 2016 13:12:25 +0800 [thread overview]
Message-ID: <20160624051227.8417-3-famz@redhat.com> (raw)
In-Reply-To: <20160624051227.8417-1-famz@redhat.com>
Apart from the interface difference, the aio version works the same as
the non-aio one. The event notifier versus aio fd handler makes no
diffeerence, except the former led to an ugly patch in commit
ab27c3b5e7, which won't be necessary any more.
As the first step to unify them, all callers are switched to this
renamed aio iterface, and function comment is added.
Signed-off-by: Fam Zheng <famz@redhat.com>
---
hw/block/dataplane/virtio-blk.c | 6 +++---
hw/scsi/virtio-scsi-dataplane.c | 9 +++++----
hw/virtio/virtio-bus.c | 13 +++++++++----
hw/virtio/virtio.c | 12 +++++++++---
include/hw/virtio/virtio.h | 6 +++---
5 files changed, 29 insertions(+), 17 deletions(-)
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 2041b04..61d65bb 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -174,8 +174,8 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
/* Get this show started by hooking up our callbacks */
aio_context_acquire(s->ctx);
- virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx,
- virtio_blk_data_plane_handle_output);
+ virtio_queue_set_host_notifier_handler(s->vq, s->ctx, true,
+ virtio_blk_data_plane_handle_output);
aio_context_release(s->ctx);
return;
@@ -210,7 +210,7 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
aio_context_acquire(s->ctx);
/* Stop notifications for new requests from guest */
- virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, NULL);
+ virtio_queue_set_host_notifier_handler(s->vq, s->ctx, false, NULL);
/* Drain and switch bs back to the QEMU main loop */
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 18ced31..ffabb87 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -80,7 +80,7 @@ static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n,
return rc;
}
- virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, fn);
+ virtio_queue_set_host_notifier_handler(vq, s->ctx, true, fn);
return 0;
}
@@ -97,10 +97,11 @@ static void virtio_scsi_clear_aio(VirtIOSCSI *s)
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
int i;
- virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, NULL);
- virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, NULL);
+ virtio_queue_set_host_notifier_handler(vs->ctrl_vq, s->ctx, false, NULL);
+ virtio_queue_set_host_notifier_handler(vs->event_vq, s->ctx, false, NULL);
for (i = 0; i < vs->conf.num_queues; i++) {
- virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, NULL);
+ virtio_queue_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, false,
+ NULL);
}
}
diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c
index f34b4fc..0f81096 100644
--- a/hw/virtio/virtio-bus.c
+++ b/hw/virtio/virtio-bus.c
@@ -166,16 +166,20 @@ static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus,
error_report("%s: unable to init event notifier: %d", __func__, r);
return r;
}
- virtio_queue_set_host_notifier_fd_handler(vq, true, true);
+ virtio_queue_set_host_notifier_handler(vq, qemu_get_aio_context(),
+ true, NULL);
+
r = k->ioeventfd_assign(proxy, notifier, n, assign);
if (r < 0) {
error_report("%s: unable to assign ioeventfd: %d", __func__, r);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ virtio_queue_set_host_notifier_handler(vq, qemu_get_aio_context(),
+ false, NULL);
event_notifier_cleanup(notifier);
return r;
}
} else {
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ virtio_queue_set_host_notifier_handler(vq, qemu_get_aio_context(),
+ false, NULL);
k->ioeventfd_assign(proxy, notifier, n, assign);
event_notifier_cleanup(notifier);
}
@@ -269,7 +273,8 @@ int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign)
* ioeventfd and we may end up with a notification where
* we don't expect one.
*/
- virtio_queue_set_host_notifier_fd_handler(vq, assign, !assign);
+ virtio_queue_set_host_notifier_handler(vq, qemu_get_aio_context(),
+ false, NULL);
if (!assign) {
/* Use generic ioeventfd handler again. */
k->ioeventfd_set_disabled(proxy, false);
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index e1e93c7..99cd0c0 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -1793,10 +1793,16 @@ static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
}
}
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
- VirtQueueHandleOutput handle_output)
+/* If assign == true, set the host notifier handler to @handle_output, or use
+ * the default vq handler if it is NULL, in the aio context @ctx.
+ * If assign == false, unregister the handler of host notifier in @ctx, and do
+ * a last host notify if there are notifications pending. */
+void virtio_queue_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
+ bool assign,
+ VirtQueueHandleOutput handle_output)
{
- if (handle_output) {
+ if (assign) {
+ handle_output = handle_output ?: vq->handle_output;
vq->handle_aio_output = handle_output;
aio_set_event_notifier(ctx, &vq->host_notifier, true,
virtio_queue_host_notifier_aio_read);
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index faec22a..9a40df7 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -250,9 +250,9 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
bool set_handler);
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
- void (*fn)(VirtIODevice *,
- VirtQueue *));
+void virtio_queue_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
+ bool assign,
+ VirtQueueHandleOutput handle_output);
void virtio_irq(VirtQueue *vq);
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
--
2.8.3
next prev parent reply other threads:[~2016-06-24 5:12 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-06-24 5:12 [Qemu-devel] [PATCH 0/4] virtio: Merge two host notifier handling paths Fam Zheng
2016-06-24 5:12 ` [Qemu-devel] [PATCH 1/4] virtio: Add typedef for handle_output Fam Zheng
2016-06-24 5:12 ` Fam Zheng [this message]
2016-06-24 6:39 ` [Qemu-devel] [PATCH 2/4] virtio: Always use aio path to set host handler Paolo Bonzini
2016-06-24 7:25 ` Fam Zheng
2016-06-24 5:12 ` [Qemu-devel] [PATCH 3/4] virtio: Drop the unused virtio_queue_set_host_notifier_fd_handler code Fam Zheng
2016-06-24 5:12 ` [Qemu-devel] [PATCH 4/4] Revert "mirror: Workaround for unexpected iohandler events during completion" Fam Zheng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20160624051227.8417-3-famz@redhat.com \
--to=famz@redhat.com \
--cc=jcody@redhat.com \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).