qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Fam Zheng <famz@redhat.com>
To: qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
	fam@euphon.net, qemu-block@nongnu.org,
	Jeff Cody <jcody@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Subject: [Qemu-devel] [RFC PATCH 10/12] virtio-scsi-dataplane: Add backend lock listener
Date: Fri, 29 May 2015 18:53:23 +0800	[thread overview]
Message-ID: <1432896805-23867-11-git-send-email-famz@redhat.com> (raw)
In-Reply-To: <1432896805-23867-1-git-send-email-famz@redhat.com>

When a disk is attached to scsi-bus, virtio_scsi_hotplug will take care
of protecting the block device with op blockers. Currently we haven't
enabled block jobs (like what's done in virtio_blk_data_plane_create),
but it is better to disable ioeventfd when backend is locked.  This is
useful to make sure that guest IO requests are paused during qmp
transactions (such as multi-disk snapshot or backup).

A counter is added to the virtio-scsi device, which keeps track of
currently blocked disks. If it goes from 0 to 1, the ioeventfds are
disabled; when it goes back to 0, they are re-enabled.

Also in device initialization, push the enabling of ioeventfds to before
return, so the virtio_scsi_clear_aio is not needed there. Rename it,
pair with an enabling variant, fix one coding style issue, then use it
in the device pause points.

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 hw/scsi/virtio-scsi-dataplane.c | 78 ++++++++++++++++++++++++++++++-----------
 hw/scsi/virtio-scsi.c           |  3 ++
 include/hw/virtio/virtio-scsi.h |  3 ++
 3 files changed, 64 insertions(+), 20 deletions(-)

diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 5575648..28706a2 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -40,7 +40,6 @@ void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
 
 static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
                                                VirtQueue *vq,
-                                               EventNotifierHandler *handler,
                                                int n)
 {
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
@@ -60,7 +59,6 @@ static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
     r = g_slice_new(VirtIOSCSIVring);
     r->host_notifier = *virtio_queue_get_host_notifier(vq);
     r->guest_notifier = *virtio_queue_get_guest_notifier(vq);
-    aio_set_event_notifier(s->ctx, &r->host_notifier, handler);
 
     r->parent = s;
 
@@ -71,7 +69,6 @@ static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
     return r;
 
 fail_vring:
-    aio_set_event_notifier(s->ctx, &r->host_notifier, NULL);
     k->set_host_notifier(qbus->parent, n, false);
     g_slice_free(VirtIOSCSIVring, r);
     return NULL;
@@ -104,6 +101,9 @@ void virtio_scsi_vring_push_notify(VirtIOSCSIReq *req)
     }
 }
 
+static void virtio_scsi_start_ioeventfd(VirtIOSCSI *s);
+static void virtio_scsi_stop_ioeventfd(VirtIOSCSI *s);
+
 static void virtio_scsi_iothread_handle_ctrl(EventNotifier *notifier)
 {
     VirtIOSCSIVring *vring = container_of(notifier,
@@ -111,6 +111,7 @@ static void virtio_scsi_iothread_handle_ctrl(EventNotifier *notifier)
     VirtIOSCSI *s = VIRTIO_SCSI(vring->parent);
     VirtIOSCSIReq *req;
 
+    assert(!s->pause_counter);
     event_notifier_test_and_clear(notifier);
     while ((req = virtio_scsi_pop_req_vring(s, vring))) {
         virtio_scsi_handle_ctrl_req(s, req);
@@ -124,6 +125,7 @@ static void virtio_scsi_iothread_handle_event(EventNotifier *notifier)
     VirtIOSCSI *s = vring->parent;
     VirtIODevice *vdev = VIRTIO_DEVICE(s);
 
+    assert(!s->pause_counter);
     event_notifier_test_and_clear(notifier);
 
     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
@@ -143,6 +145,7 @@ static void virtio_scsi_iothread_handle_cmd(EventNotifier *notifier)
     VirtIOSCSIReq *req, *next;
     QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
 
+    assert(!s->pause_counter);
     event_notifier_test_and_clear(notifier);
     while ((req = virtio_scsi_pop_req_vring(s, vring))) {
         if (virtio_scsi_handle_cmd_req_prepare(s, req)) {
@@ -155,8 +158,52 @@ static void virtio_scsi_iothread_handle_cmd(EventNotifier *notifier)
     }
 }
 
+void virtio_scsi_dataplane_pause_handler(Notifier *notifier, void *data)
+{
+    VirtIOSCSI *s = container_of(notifier, VirtIOSCSI, pause_notifier);
+    BdrvLockEvent *event = data;
+
+    if (event->locking) {
+        s->pause_counter++;
+        if (s->pause_counter == 1) {
+            virtio_scsi_stop_ioeventfd(s);
+        }
+    } else {
+        s->pause_counter--;
+        if (s->pause_counter == 0) {
+            virtio_scsi_start_ioeventfd(s);
+        }
+    }
+    assert(s->pause_counter >= 0);
+}
+
 /* assumes s->ctx held */
-static void virtio_scsi_clear_aio(VirtIOSCSI *s)
+static void virtio_scsi_start_ioeventfd(VirtIOSCSI *s)
+{
+    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+    int i;
+
+    if (!s->dataplane_started || s->dataplane_stopping) {
+        return;
+    }
+    if (s->ctrl_vring) {
+        aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier,
+                               virtio_scsi_iothread_handle_ctrl);
+    }
+    if (s->event_vring) {
+        aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier,
+                               virtio_scsi_iothread_handle_event);
+    }
+    if (s->cmd_vrings) {
+        for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) {
+            aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier,
+                                   virtio_scsi_iothread_handle_cmd);
+        }
+    }
+}
+
+/* assumes s->ctx held */
+static void virtio_scsi_stop_ioeventfd(VirtIOSCSI *s)
 {
     VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
     int i;
@@ -169,7 +216,8 @@ static void virtio_scsi_clear_aio(VirtIOSCSI *s)
     }
     if (s->cmd_vrings) {
         for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) {
-            aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier, NULL);
+            aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier,
+                                   NULL);
         }
     }
 }
@@ -229,24 +277,18 @@ void virtio_scsi_dataplane_start(VirtIOSCSI *s)
     }
 
     aio_context_acquire(s->ctx);
-    s->ctrl_vring = virtio_scsi_vring_init(s, vs->ctrl_vq,
-                                           virtio_scsi_iothread_handle_ctrl,
-                                           0);
+    s->ctrl_vring = virtio_scsi_vring_init(s, vs->ctrl_vq, 0);
     if (!s->ctrl_vring) {
         goto fail_vrings;
     }
-    s->event_vring = virtio_scsi_vring_init(s, vs->event_vq,
-                                            virtio_scsi_iothread_handle_event,
-                                            1);
+    s->event_vring = virtio_scsi_vring_init(s, vs->event_vq, 1);
     if (!s->event_vring) {
         goto fail_vrings;
     }
     s->cmd_vrings = g_new(VirtIOSCSIVring *, vs->conf.num_queues);
     for (i = 0; i < vs->conf.num_queues; i++) {
         s->cmd_vrings[i] =
-            virtio_scsi_vring_init(s, vs->cmd_vqs[i],
-                                   virtio_scsi_iothread_handle_cmd,
-                                   i + 2);
+            virtio_scsi_vring_init(s, vs->cmd_vqs[i], i + 2);
         if (!s->cmd_vrings[i]) {
             goto fail_vrings;
         }
@@ -254,11 +296,11 @@ void virtio_scsi_dataplane_start(VirtIOSCSI *s)
 
     s->dataplane_starting = false;
     s->dataplane_started = true;
+    virtio_scsi_start_ioeventfd(s);
     aio_context_release(s->ctx);
     return;
 
 fail_vrings:
-    virtio_scsi_clear_aio(s);
     aio_context_release(s->ctx);
     virtio_scsi_vring_teardown(s);
     for (i = 0; i < vs->conf.num_queues + 2; i++) {
@@ -290,11 +332,7 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
 
     aio_context_acquire(s->ctx);
 
-    aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier, NULL);
-    aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier, NULL);
-    for (i = 0; i < vs->conf.num_queues; i++) {
-        aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier, NULL);
-    }
+    virtio_scsi_stop_ioeventfd(s);
 
     blk_drain_all(); /* ensure there are no in-flight requests */
 
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index e242fef..be2542e 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -774,6 +774,8 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
         blk_op_block_all(sd->conf.blk, s->blocker);
         aio_context_acquire(s->ctx);
         blk_set_aio_context(sd->conf.blk, s->ctx);
+        s->pause_notifier.notify = virtio_scsi_dataplane_pause_handler;
+        blk_add_lock_unlock_notifier(sd->conf.blk, &s->pause_notifier);
         aio_context_release(s->ctx);
     }
 
@@ -798,6 +800,7 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
     }
 
     if (s->ctx) {
+        notifier_remove(&s->pause_notifier);
         blk_op_unblock_all(sd->conf.blk, s->blocker);
     }
     qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index b42e7f1..928b87e 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -97,6 +97,8 @@ typedef struct VirtIOSCSI {
     bool dataplane_disabled;
     bool dataplane_fenced;
     Error *blocker;
+    Notifier pause_notifier;
+    int pause_counter;
     Notifier migration_state_notifier;
     uint32_t host_features;
 } VirtIOSCSI;
@@ -170,6 +172,7 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
                             uint32_t event, uint32_t reason);
 
 void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread);
+void virtio_scsi_dataplane_pause_handler(Notifier *notifier, void *data);
 void virtio_scsi_dataplane_start(VirtIOSCSI *s);
 void virtio_scsi_dataplane_stop(VirtIOSCSI *s);
 void virtio_scsi_vring_push_notify(VirtIOSCSIReq *req);
-- 
2.4.2

  parent reply	other threads:[~2015-05-29 10:54 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-05-29 10:53 [Qemu-devel] [RFC PATCH 00/12] block: Protect block jobs with lock / unlock API Fam Zheng
2015-05-29 10:53 ` [Qemu-devel] [RFC PATCH 01/12] block: Use bdrv_drain to replace uncessary bdrv_drain_all Fam Zheng
2015-05-29 11:42   ` Paolo Bonzini
2015-06-25 12:19   ` [Qemu-devel] [PATCH for-2.4 " Paolo Bonzini
2015-05-29 10:53 ` [Qemu-devel] [RFC PATCH 02/12] block: Introduce bdrv_lock and bdrv_unlock API Fam Zheng
2015-05-29 16:57   ` Eric Blake
2015-05-29 10:53 ` [Qemu-devel] [RFC PATCH 03/12] blockdev: Lock BDS during internal snapshot transaction Fam Zheng
2015-05-29 11:39   ` Paolo Bonzini
2015-05-29 10:53 ` [Qemu-devel] [RFC PATCH 04/12] blockdev: Lock BDS during external " Fam Zheng
2015-05-29 10:53 ` [Qemu-devel] [RFC PATCH 05/12] blockdev: Lock BDS during drive-backup transaction Fam Zheng
2015-05-29 10:53 ` [Qemu-devel] [RFC PATCH 06/12] blockdev: Lock BDS during blockdev-backup transaction Fam Zheng
2015-05-29 10:53 ` [Qemu-devel] [RFC PATCH 07/12] block-backend: Add blk_add_lock_unlock_notifier Fam Zheng
2015-05-29 10:53 ` [Qemu-devel] [RFC PATCH 08/12] virtio-blk: Move complete_request to 'ops' structure Fam Zheng
2015-05-29 10:53 ` [Qemu-devel] [RFC PATCH 09/12] virtio-blk: Don't handle output when backend is locked Fam Zheng
2015-05-29 10:53 ` Fam Zheng [this message]
2015-05-29 10:53 ` [Qemu-devel] [RFC PATCH 11/12] nbd-server: Clear "can_read" " Fam Zheng
2015-05-29 10:53 ` [Qemu-devel] [RFC PATCH 12/12] mirror: Protect source between bdrv_drain and bdrv_swap Fam Zheng
2015-05-29 12:01 ` [Qemu-devel] [RFC PATCH 00/12] block: Protect block jobs with lock / unlock API Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1432896805-23867-11-git-send-email-famz@redhat.com \
    --to=famz@redhat.com \
    --cc=fam@euphon.net \
    --cc=jcody@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).