From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Fam Zheng" <fam@euphon.net>,
"Stefan Hajnoczi" <stefanha@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Stefano Stabellini" <sstabellini@kernel.org>,
qemu-block@nongnu.org, "Juan Quintela" <quintela@redhat.com>,
"Paul Durrant" <paul@xen.org>,
"Anthony Perard" <anthony.perard@citrix.com>,
xen-devel@lists.xenproject.org,
"Philippe Mathieu-Daudé" <philmd@redhat.com>,
"Stefano Garzarella" <sgarzare@redhat.com>,
"Peter Lieven" <pl@kamp.de>, "Stefan Weil" <sw@weilnetz.de>,
"Julia Suvorova" <jusual@redhat.com>,
"Dr. David Alan Gilbert" <dgilbert@redhat.com>,
"Ronnie Sahlberg" <ronniesahlberg@gmail.com>,
"Aarushi Mehta" <mehta.aaru20@gmail.com>,
"Kevin Wolf" <kwolf@redhat.com>,
"Daniel P. Berrangé" <berrange@redhat.com>,
"Richard W.M. Jones" <rjones@redhat.com>,
"Coiby Xu" <Coiby.Xu@gmail.com>,
"Hanna Reitz" <hreitz@redhat.com>,
"Paolo Bonzini" <pbonzini@redhat.com>
Subject: [PATCH v3 6/6] virtio: unify dataplane and non-dataplane ->handle_output()
Date: Tue, 7 Dec 2021 13:23:36 +0000 [thread overview]
Message-ID: <20211207132336.36627-7-stefanha@redhat.com> (raw)
In-Reply-To: <20211207132336.36627-1-stefanha@redhat.com>
Now that virtio-blk and virtio-scsi are ready, get rid of
the handle_aio_output() callback. It's no longer needed.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
include/hw/virtio/virtio.h | 4 +--
hw/block/dataplane/virtio-blk.c | 16 ++--------
hw/scsi/virtio-scsi-dataplane.c | 54 ++++-----------------------------
hw/virtio/virtio.c | 32 +++++++++----------
4 files changed, 26 insertions(+), 80 deletions(-)
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index b90095628f..f095637058 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -316,8 +316,8 @@ bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
void virtio_queue_host_notifier_read(EventNotifier *n);
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
- VirtIOHandleOutput handle_output);
+void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx);
+void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx);
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index a2fa407b98..49276e46f2 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -154,17 +154,6 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
g_free(s);
}
-static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
- VirtQueue *vq)
-{
- VirtIOBlock *s = (VirtIOBlock *)vdev;
-
- assert(s->dataplane);
- assert(s->dataplane_started);
-
- virtio_blk_handle_vq(s, vq);
-}
-
/* Context: QEMU global mutex held */
int virtio_blk_data_plane_start(VirtIODevice *vdev)
{
@@ -258,8 +247,7 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
for (i = 0; i < nvqs; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i);
- virtio_queue_aio_set_host_notifier_handler(vq, s->ctx,
- virtio_blk_data_plane_handle_output);
+ virtio_queue_aio_attach_host_notifier(vq, s->ctx);
}
aio_context_release(s->ctx);
return 0;
@@ -302,7 +290,7 @@ static void virtio_blk_data_plane_stop_bh(void *opaque)
for (i = 0; i < s->conf->num_queues; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i);
- virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL);
+ virtio_queue_aio_detach_host_notifier(vq, s->ctx);
}
}
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 76137de67f..29575cbaf6 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -49,45 +49,6 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
}
}
-static void virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev,
- VirtQueue *vq)
-{
- VirtIOSCSI *s = VIRTIO_SCSI(vdev);
-
- virtio_scsi_acquire(s);
- if (!s->dataplane_fenced) {
- assert(s->ctx && s->dataplane_started);
- virtio_scsi_handle_cmd_vq(s, vq);
- }
- virtio_scsi_release(s);
-}
-
-static void virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev,
- VirtQueue *vq)
-{
- VirtIOSCSI *s = VIRTIO_SCSI(vdev);
-
- virtio_scsi_acquire(s);
- if (!s->dataplane_fenced) {
- assert(s->ctx && s->dataplane_started);
- virtio_scsi_handle_ctrl_vq(s, vq);
- }
- virtio_scsi_release(s);
-}
-
-static void virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
- VirtQueue *vq)
-{
- VirtIOSCSI *s = VIRTIO_SCSI(vdev);
-
- virtio_scsi_acquire(s);
- if (!s->dataplane_fenced) {
- assert(s->ctx && s->dataplane_started);
- virtio_scsi_handle_event_vq(s, vq);
- }
- virtio_scsi_release(s);
-}
-
static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
@@ -112,10 +73,10 @@ static void virtio_scsi_dataplane_stop_bh(void *opaque)
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
int i;
- virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, NULL);
- virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, NULL);
+ virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx);
+ virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx);
for (i = 0; i < vs->conf.num_queues; i++) {
- virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, NULL);
+ virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx);
}
}
@@ -176,14 +137,11 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
memory_region_transaction_commit();
aio_context_acquire(s->ctx);
- virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx,
- virtio_scsi_data_plane_handle_ctrl);
- virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx,
- virtio_scsi_data_plane_handle_event);
+ virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
+ virtio_queue_aio_attach_host_notifier(vs->event_vq, s->ctx);
for (i = 0; i < vs->conf.num_queues; i++) {
- virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx,
- virtio_scsi_data_plane_handle_cmd);
+ virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
}
s->dataplane_starting = false;
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index a97a406d3c..ce182a4e57 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -3522,23 +3522,23 @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
virtio_queue_set_notification(vq, 1);
}
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
- VirtIOHandleOutput handle_output)
+void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
{
- if (handle_output) {
- aio_set_event_notifier(ctx, &vq->host_notifier, true,
- virtio_queue_host_notifier_read,
- virtio_queue_host_notifier_aio_poll,
- virtio_queue_host_notifier_aio_poll_ready);
- aio_set_event_notifier_poll(ctx, &vq->host_notifier,
- virtio_queue_host_notifier_aio_poll_begin,
- virtio_queue_host_notifier_aio_poll_end);
- } else {
- aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
- /* Test and clear notifier before after disabling event,
- * in case poll callback didn't have time to run. */
- virtio_queue_host_notifier_read(&vq->host_notifier);
- }
+ aio_set_event_notifier(ctx, &vq->host_notifier, true,
+ virtio_queue_host_notifier_read,
+ virtio_queue_host_notifier_aio_poll,
+ virtio_queue_host_notifier_aio_poll_ready);
+ aio_set_event_notifier_poll(ctx, &vq->host_notifier,
+ virtio_queue_host_notifier_aio_poll_begin,
+ virtio_queue_host_notifier_aio_poll_end);
+}
+
+void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
+{
+ aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
+ /* Test and clear notifier before after disabling event,
+ * in case poll callback didn't have time to run. */
+ virtio_queue_host_notifier_read(&vq->host_notifier);
}
void virtio_queue_host_notifier_read(EventNotifier *n)
--
2.33.1
next prev parent reply other threads:[~2021-12-07 13:37 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-07 13:23 [PATCH v3 0/6] aio-posix: split poll check from ready handler Stefan Hajnoczi
2021-12-07 13:23 ` [PATCH v3 1/6] " Stefan Hajnoczi
2021-12-07 13:23 ` [PATCH v3 2/6] virtio: get rid of VirtIOHandleAIOOutput Stefan Hajnoczi
2021-12-07 13:23 ` [PATCH v3 3/6] virtio-blk: drop unused virtio_blk_handle_vq() return value Stefan Hajnoczi
2021-12-07 13:23 ` [PATCH v3 4/6] virtio-scsi: prepare virtio_scsi_handle_cmd for dataplane Stefan Hajnoczi
2021-12-07 13:23 ` [PATCH v3 5/6] virtio: use ->handle_output() instead of ->handle_aio_output() Stefan Hajnoczi
2021-12-07 13:23 ` Stefan Hajnoczi [this message]
2021-12-09 10:16 ` [PATCH v3 0/6] aio-posix: split poll check from ready handler Stefano Garzarella
2021-12-13 16:01 ` Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211207132336.36627-7-stefanha@redhat.com \
--to=stefanha@redhat.com \
--cc=Coiby.Xu@gmail.com \
--cc=anthony.perard@citrix.com \
--cc=berrange@redhat.com \
--cc=dgilbert@redhat.com \
--cc=fam@euphon.net \
--cc=hreitz@redhat.com \
--cc=jusual@redhat.com \
--cc=kwolf@redhat.com \
--cc=mehta.aaru20@gmail.com \
--cc=mst@redhat.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=philmd@redhat.com \
--cc=pl@kamp.de \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=rjones@redhat.com \
--cc=ronniesahlberg@gmail.com \
--cc=sgarzare@redhat.com \
--cc=sstabellini@kernel.org \
--cc=sw@weilnetz.de \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).