From: Fam Zheng <famz@redhat.com>
To: qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
qemu-block@nongnu.org, Juan Quintela <quintela@redhat.com>,
Jeff Cody <jcody@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
mreitz@redhat.com, Stefan Hajnoczi <stefanha@redhat.com>,
Amit Shah <amit.shah@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>
Subject: [Qemu-devel] [PATCH v4 06/13] virtio-scsi-dataplane: Add "device IO" op blocker listener
Date: Tue, 19 May 2015 11:49:39 +0000 [thread overview]
Message-ID: <1432036186-29903-7-git-send-email-famz@redhat.com> (raw)
In-Reply-To: <1432036186-29903-1-git-send-email-famz@redhat.com>
When a disk is attached to scsi-bus, virtio_scsi_hotplug will take care
of protecting the block device with op blockers. Currently we haven't
enabled block jobs (like what's done in virtio_blk_data_plane_create),
but it is necessary to honor "device IO" op blocker first before we do.
This is useful to make sure that guest IO requests are paused during qmp
transactions (such as multi-disk snapshot or backup).
A counter is added to the virtio-scsi device, which keeps track of
currently blocked disks. If it goes from 0 to 1, the ioeventfds are
disabled; when it goes back to 0, they are re-enabled.
Also in device initialization, push the enabling of ioeventfds to before
return, so the virtio_scsi_clear_aio is not needed there. Rename it,
pair with an enabling variant, fix one coding style issue, then use it
in the device pause points.
Signed-off-by: Fam Zheng <famz@redhat.com>
---
hw/scsi/virtio-scsi-dataplane.c | 82 +++++++++++++++++++++++++++++++----------
hw/scsi/virtio-scsi.c | 3 ++
include/hw/virtio/virtio-scsi.h | 3 ++
3 files changed, 68 insertions(+), 20 deletions(-)
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 5575648..e220c12 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -40,7 +40,6 @@ void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
VirtQueue *vq,
- EventNotifierHandler *handler,
int n)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
@@ -60,7 +59,6 @@ static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
r = g_slice_new(VirtIOSCSIVring);
r->host_notifier = *virtio_queue_get_host_notifier(vq);
r->guest_notifier = *virtio_queue_get_guest_notifier(vq);
- aio_set_event_notifier(s->ctx, &r->host_notifier, handler);
r->parent = s;
@@ -71,7 +69,6 @@ static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
return r;
fail_vring:
- aio_set_event_notifier(s->ctx, &r->host_notifier, NULL);
k->set_host_notifier(qbus->parent, n, false);
g_slice_free(VirtIOSCSIVring, r);
return NULL;
@@ -104,6 +101,9 @@ void virtio_scsi_vring_push_notify(VirtIOSCSIReq *req)
}
}
+static void virtio_scsi_start_ioeventfd(VirtIOSCSI *s);
+static void virtio_scsi_stop_ioeventfd(VirtIOSCSI *s);
+
static void virtio_scsi_iothread_handle_ctrl(EventNotifier *notifier)
{
VirtIOSCSIVring *vring = container_of(notifier,
@@ -111,6 +111,7 @@ static void virtio_scsi_iothread_handle_ctrl(EventNotifier *notifier)
VirtIOSCSI *s = VIRTIO_SCSI(vring->parent);
VirtIOSCSIReq *req;
+ assert(!s->pause_counter);
event_notifier_test_and_clear(notifier);
while ((req = virtio_scsi_pop_req_vring(s, vring))) {
virtio_scsi_handle_ctrl_req(s, req);
@@ -124,6 +125,7 @@ static void virtio_scsi_iothread_handle_event(EventNotifier *notifier)
VirtIOSCSI *s = vring->parent;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
+ assert(!s->pause_counter);
event_notifier_test_and_clear(notifier);
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
@@ -143,6 +145,7 @@ static void virtio_scsi_iothread_handle_cmd(EventNotifier *notifier)
VirtIOSCSIReq *req, *next;
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
+ assert(!s->pause_counter);
event_notifier_test_and_clear(notifier);
while ((req = virtio_scsi_pop_req_vring(s, vring))) {
if (virtio_scsi_handle_cmd_req_prepare(s, req)) {
@@ -155,8 +158,56 @@ static void virtio_scsi_iothread_handle_cmd(EventNotifier *notifier)
}
}
+void virtio_scsi_dataplane_blocker_notify(Notifier *notifier,
+ void *data)
+{
+ VirtIOSCSI *s = container_of(notifier, VirtIOSCSI, blocker_notifier);
+ BlockOpEvent *event = data;
+
+ if (event->type != BLOCK_OP_TYPE_DEVICE_IO) {
+ return;
+ }
+ if (event->blocking) {
+ s->pause_counter++;
+ if (s->pause_counter == 1) {
+ virtio_scsi_stop_ioeventfd(s);
+ }
+ } else {
+ s->pause_counter--;
+ if (s->pause_counter == 0) {
+ virtio_scsi_start_ioeventfd(s);
+ }
+ }
+ assert(s->pause_counter >= 0);
+}
+
/* assumes s->ctx held */
-static void virtio_scsi_clear_aio(VirtIOSCSI *s)
+static void virtio_scsi_start_ioeventfd(VirtIOSCSI *s)
+{
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ int i;
+
+ if (!s->dataplane_started || s->dataplane_stopping) {
+ return;
+ }
+ if (s->ctrl_vring) {
+ aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier,
+ virtio_scsi_iothread_handle_ctrl);
+ }
+ if (s->event_vring) {
+ aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier,
+ virtio_scsi_iothread_handle_event);
+ }
+ if (s->cmd_vrings) {
+ for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) {
+ aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier,
+ virtio_scsi_iothread_handle_cmd);
+ }
+ }
+}
+
+/* assumes s->ctx held */
+static void virtio_scsi_stop_ioeventfd(VirtIOSCSI *s)
{
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
int i;
@@ -169,7 +220,8 @@ static void virtio_scsi_clear_aio(VirtIOSCSI *s)
}
if (s->cmd_vrings) {
for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) {
- aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier, NULL);
+ aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier,
+ NULL);
}
}
}
@@ -229,24 +281,18 @@ void virtio_scsi_dataplane_start(VirtIOSCSI *s)
}
aio_context_acquire(s->ctx);
- s->ctrl_vring = virtio_scsi_vring_init(s, vs->ctrl_vq,
- virtio_scsi_iothread_handle_ctrl,
- 0);
+ s->ctrl_vring = virtio_scsi_vring_init(s, vs->ctrl_vq, 0);
if (!s->ctrl_vring) {
goto fail_vrings;
}
- s->event_vring = virtio_scsi_vring_init(s, vs->event_vq,
- virtio_scsi_iothread_handle_event,
- 1);
+ s->event_vring = virtio_scsi_vring_init(s, vs->event_vq, 1);
if (!s->event_vring) {
goto fail_vrings;
}
s->cmd_vrings = g_new(VirtIOSCSIVring *, vs->conf.num_queues);
for (i = 0; i < vs->conf.num_queues; i++) {
s->cmd_vrings[i] =
- virtio_scsi_vring_init(s, vs->cmd_vqs[i],
- virtio_scsi_iothread_handle_cmd,
- i + 2);
+ virtio_scsi_vring_init(s, vs->cmd_vqs[i], i + 2);
if (!s->cmd_vrings[i]) {
goto fail_vrings;
}
@@ -254,11 +300,11 @@ void virtio_scsi_dataplane_start(VirtIOSCSI *s)
s->dataplane_starting = false;
s->dataplane_started = true;
+ virtio_scsi_start_ioeventfd(s);
aio_context_release(s->ctx);
return;
fail_vrings:
- virtio_scsi_clear_aio(s);
aio_context_release(s->ctx);
virtio_scsi_vring_teardown(s);
for (i = 0; i < vs->conf.num_queues + 2; i++) {
@@ -290,11 +336,7 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
aio_context_acquire(s->ctx);
- aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier, NULL);
- aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier, NULL);
- for (i = 0; i < vs->conf.num_queues; i++) {
- aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier, NULL);
- }
+ virtio_scsi_stop_ioeventfd(s);
blk_drain_all(); /* ensure there are no in-flight requests */
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 5e15fa6..16c253f 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -775,6 +775,8 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
blk_op_unblock(sd->conf.blk, BLOCK_OP_TYPE_DEVICE_IO, s->blocker);
aio_context_acquire(s->ctx);
blk_set_aio_context(sd->conf.blk, s->ctx);
+ s->blocker_notifier.notify = virtio_scsi_dataplane_blocker_notify;
+ blk_op_blocker_add_notifier(sd->conf.blk, &s->blocker_notifier);
aio_context_release(s->ctx);
}
@@ -799,6 +801,7 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
}
if (s->ctx) {
+ notifier_remove(&s->blocker_notifier);
blk_op_unblock_all(sd->conf.blk, s->blocker);
}
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index b42e7f1..f8591fd 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -97,6 +97,8 @@ typedef struct VirtIOSCSI {
bool dataplane_disabled;
bool dataplane_fenced;
Error *blocker;
+ Notifier blocker_notifier;
+ int pause_counter;
Notifier migration_state_notifier;
uint32_t host_features;
} VirtIOSCSI;
@@ -170,6 +172,7 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
uint32_t event, uint32_t reason);
void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread);
+void virtio_scsi_dataplane_blocker_notify(Notifier *notifier, void *data);
void virtio_scsi_dataplane_start(VirtIOSCSI *s);
void virtio_scsi_dataplane_stop(VirtIOSCSI *s);
void virtio_scsi_vring_push_notify(VirtIOSCSIReq *req);
--
2.4.1
next prev parent reply other threads:[~2015-05-19 3:50 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-05-19 11:49 [Qemu-devel] [PATCH v4 00/13] Fix transactional snapshot with dataplane and NBD export Fam Zheng
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 01/13] block: Add op blocker type "device IO" Fam Zheng
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 02/13] block: Add op blocker notifier list Fam Zheng
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 03/13] block-backend: Add blk_op_blocker_add_notifier Fam Zheng
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 04/13] virtio-blk: Move complete_request to 'ops' structure Fam Zheng
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 05/13] virtio-blk: Don't handle output when there is "device IO" op blocker Fam Zheng
2015-05-19 11:49 ` Fam Zheng [this message]
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 07/13] nbd-server: Clear "can_read" when "device io" blocker is set Fam Zheng
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 08/13] blockdev: Block device IO during internal snapshot transaction Fam Zheng
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 09/13] blockdev: Block device IO during external " Fam Zheng
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 10/13] blockdev: Block device IO during drive-backup transaction Fam Zheng
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 11/13] blockdev: Block device IO during blockdev-backup transaction Fam Zheng
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 12/13] block: Block "device IO" during bdrv_drain and bdrv_drain_all Fam Zheng
2015-05-19 11:49 ` [Qemu-devel] [PATCH v4 13/13] block/mirror: Block "device IO" during mirror exit Fam Zheng
2015-05-19 8:04 ` Paolo Bonzini
2015-05-19 16:48 ` Fam Zheng
2015-05-19 8:49 ` Paolo Bonzini
2015-05-19 18:37 ` Fam Zheng
2015-05-19 10:57 ` Paolo Bonzini
2015-05-20 2:23 ` Fam Zheng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1432036186-29903-7-git-send-email-famz@redhat.com \
--to=famz@redhat.com \
--cc=amit.shah@redhat.com \
--cc=jcody@redhat.com \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).