From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Cornelia Huck <cornelia.huck@de.ibm.com>, pbonzini@redhat.com
Subject: [Qemu-devel] [PATCH 1/2] virtio: add aio handler
Date: Tue, 29 Mar 2016 16:42:11 +0300 [thread overview]
Message-ID: <1459258923-10319-2-git-send-email-mst@redhat.com> (raw)
In-Reply-To: <1459258923-10319-1-git-send-email-mst@redhat.com>
In addition to handling IO in vcpu thread and
in io thread, blk dataplane introduces yet another mode:
handling it by aio.
This reuses the same handler as previous modes,
which triggers races as these were not designed to be reentrant.
As a temporary fix, add a separate handler just for aio, this will make
it possible to disable regular handlers when dataplane is active.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
include/hw/virtio/virtio.h | 4 ++++
hw/virtio/virtio.c | 36 ++++++++++++++++++++++++++++++++----
2 files changed, 36 insertions(+), 4 deletions(-)
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 2b5b248..c032067 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -142,6 +142,9 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
void (*handle_output)(VirtIODevice *,
VirtQueue *));
+void virtio_set_queue_aio(VirtQueue *vq,
+ void (*handle_output)(VirtIODevice *, VirtQueue *));
+
void virtio_del_queue(VirtIODevice *vdev, int n);
void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num);
@@ -253,6 +256,7 @@ void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
bool assign, bool set_handler);
void virtio_queue_notify_vq(VirtQueue *vq);
+void virtio_queue_notify_aio_vq(VirtQueue *vq);
void virtio_irq(VirtQueue *vq);
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 08275a9..182bc56 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -94,6 +94,7 @@ struct VirtQueue
uint16_t vector;
void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
+ void (*handle_aio_output)(VirtIODevice *vdev, VirtQueue *vq);
VirtIODevice *vdev;
EventNotifier guest_notifier;
EventNotifier host_notifier;
@@ -1086,6 +1087,16 @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
virtio_queue_update_rings(vdev, n);
}
+void virtio_queue_notify_aio_vq(VirtQueue *vq)
+{
+ if (vq->vring.desc && vq->handle_aio_output) {
+ VirtIODevice *vdev = vq->vdev;
+
+ trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
+ vq->handle_aio_output(vdev, vq);
+ }
+}
+
void virtio_queue_notify_vq(VirtQueue *vq)
{
if (vq->vring.desc && vq->handle_output) {
@@ -1141,10 +1152,19 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
vdev->vq[i].vring.num_default = queue_size;
vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
vdev->vq[i].handle_output = handle_output;
+ vdev->vq[i].handle_aio_output = NULL;
return &vdev->vq[i];
}
+void virtio_set_queue_aio(VirtQueue *vq,
+ void (*handle_output)(VirtIODevice *, VirtQueue *))
+{
+ assert(vq->handle_output);
+
+ vq->handle_aio_output = handle_output;
+}
+
void virtio_del_queue(VirtIODevice *vdev, int n)
{
if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
@@ -1778,11 +1798,11 @@ EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
return &vq->guest_notifier;
}
-static void virtio_queue_host_notifier_read(EventNotifier *n)
+static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
{
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
if (event_notifier_test_and_clear(n)) {
- virtio_queue_notify_vq(vq);
+ virtio_queue_notify_aio_vq(vq);
}
}
@@ -1791,14 +1811,22 @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
{
if (assign && set_handler) {
aio_set_event_notifier(ctx, &vq->host_notifier, true,
- virtio_queue_host_notifier_read);
+ virtio_queue_host_notifier_aio_read);
} else {
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
}
if (!assign) {
/* Test and clear notifier before after disabling event,
* in case poll callback didn't have time to run. */
- virtio_queue_host_notifier_read(&vq->host_notifier);
+ virtio_queue_host_notifier_aio_read(&vq->host_notifier);
+ }
+}
+
+static void virtio_queue_host_notifier_read(EventNotifier *n)
+{
+ VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
+ if (event_notifier_test_and_clear(n)) {
+ virtio_queue_notify_vq(vq);
}
}
--
MST
next prev parent reply other threads:[~2016-03-29 13:42 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-03-29 13:42 [Qemu-devel] [PATCH 0/2] dataplane: fix start/stop races Michael S. Tsirkin
2016-03-29 13:42 ` Michael S. Tsirkin [this message]
2016-03-29 14:19 ` [Qemu-devel] [PATCH 1/2] virtio: add aio handler Paolo Bonzini
2016-03-29 13:42 ` [Qemu-devel] [PATCH 2/2] virtio-blk: use aio handler for data plane Michael S. Tsirkin
2016-03-29 13:56 ` Paolo Bonzini
2016-03-29 13:58 ` Michael S. Tsirkin
2016-03-29 13:59 ` Paolo Bonzini
2016-03-29 14:05 ` Paolo Bonzini
2016-03-29 14:09 ` Michael S. Tsirkin
2016-03-29 14:44 ` Paolo Bonzini
2016-03-29 13:43 ` [Qemu-devel] [PATCH 0/2] dataplane: fix start/stop races Michael S. Tsirkin
2016-03-29 15:06 ` Cornelia Huck
2016-03-29 16:31 ` Christian Borntraeger
2016-03-30 3:04 ` tu bo
2016-03-30 7:32 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1459258923-10319-2-git-send-email-mst@redhat.com \
--to=mst@redhat.com \
--cc=cornelia.huck@de.ibm.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).