From: Fam Zheng <famz@redhat.com>
To: qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>, Jeff Cody <jcody@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
qemu-block@nongnu.org
Subject: [Qemu-devel] [PATCH v2 11/13] virtio-blk: Don't handle output when backend is locked
Date: Tue, 2 Jun 2015 11:22:00 +0800 [thread overview]
Message-ID: <1433215322-23529-12-git-send-email-famz@redhat.com> (raw)
In-Reply-To: <1433215322-23529-1-git-send-email-famz@redhat.com>
virtio-blk now listens to locking and unlocking of the associated block
backend.
Up on locking:
non-dataplane:
1) Set VirtIOBlock.paused
2) In virtio_blk_handle_output, do nothing if VirtIOBlock.paused
dataplane:
1) Clear the host event notifier
2) In handle_notify, do nothing if VirtIOBlock.paused
Up on unlocking:
non-dataplane:
1) Clear VirtIOBlock.paused
2) Schedule a BH on the AioContext of the backend, which calls
virtio_blk_handle_output, so that the previous unhandled kicks can
make progress
dataplane:
1) Set the host event notifier
2) Notify the host event notifier so that unhandled events are
processed
Signed-off-by: Fam Zheng <famz@redhat.com>
---
hw/block/dataplane/virtio-blk.c | 25 +++++++++++++++++-
hw/block/virtio-blk.c | 57 +++++++++++++++++++++++++++++++++++++++--
include/hw/virtio/virtio-blk.h | 8 +++++-
3 files changed, 86 insertions(+), 4 deletions(-)
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index ec0c8f4..d6c943c 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -87,8 +87,28 @@ static void complete_request_vring(VirtIOBlockReq *req, unsigned char status)
qemu_bh_schedule(s->bh);
}
+static void virtio_blk_data_plane_pause(VirtIOBlock *vblk)
+{
+ VirtIOBlockDataPlane *s = vblk->dataplane;
+
+ event_notifier_test_and_clear(&s->host_notifier);
+ aio_set_event_notifier(s->ctx, &s->host_notifier, NULL);
+}
+
+static void handle_notify(EventNotifier *e);
+static void virtio_blk_data_plane_resume(VirtIOBlock *vblk)
+{
+ VirtIOBlockDataPlane *s = vblk->dataplane;
+
+ aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify);
+
+ event_notifier_set(&s->host_notifier);
+}
+
static const VirtIOBlockOps virtio_blk_data_plane_ops = {
- .complete_request = complete_request_vring,
+ .complete_request = complete_request_vring,
+ .pause = virtio_blk_data_plane_pause,
+ .resume = virtio_blk_data_plane_resume,
};
static void handle_notify(EventNotifier *e)
@@ -98,6 +118,9 @@ static void handle_notify(EventNotifier *e)
VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
event_notifier_test_and_clear(&s->host_notifier);
+ if (vblk->paused) {
+ return;
+ }
blk_io_plug(s->conf->conf.blk);
for (;;) {
MultiReqBuffer mrb = {};
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index af20e78..d485a40 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -59,8 +59,38 @@ static void virtio_blk_complete_request(VirtIOBlockReq *req,
virtio_notify(vdev, s->vq);
}
+typedef struct {
+ QEMUBH *bh;
+ VirtIOBlock *s;
+} VirtIOBlockResumeData;
+
+static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq);
+static void virtio_blk_resume_bh_cb(void *opaque)
+{
+ VirtIOBlockResumeData *data = opaque;
+ qemu_bh_delete(data->bh);
+ virtio_blk_handle_output(VIRTIO_DEVICE(data->s), data->s->vq);
+}
+
+static void virtio_blk_pause(VirtIOBlock *vblk)
+{
+ /* TODO: stop ioeventfd */
+}
+
+static void virtio_blk_resume(VirtIOBlock *vblk)
+{
+ VirtIOBlockResumeData *data = g_new(VirtIOBlockResumeData, 1);
+ data->bh = aio_bh_new(blk_get_aio_context(vblk->blk),
+ virtio_blk_resume_bh_cb, data);
+ data->s = vblk;
+ data->s->paused = false;
+ qemu_bh_schedule(data->bh);
+}
+
static const VirtIOBlockOps virtio_blk_ops = (VirtIOBlockOps) {
- .complete_request = virtio_blk_complete_request,
+ .complete_request = virtio_blk_complete_request,
+ .pause = virtio_blk_pause,
+ .resume = virtio_blk_resume,
};
static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
@@ -597,6 +627,9 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
VirtIOBlockReq *req;
MultiReqBuffer mrb = {};
+ if (s->paused) {
+ return;
+ }
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
* dataplane here instead of waiting for .set_status().
*/
@@ -787,7 +820,7 @@ static void virtio_blk_save(QEMUFile *f, void *opaque)
virtio_save(vdev, f);
}
-
+
static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
@@ -875,6 +908,22 @@ static void virtio_blk_migration_state_changed(Notifier *notifier, void *data)
}
}
+static void virtio_blk_pause_handler(Notifier *notifier, void *opaque)
+{
+ BdrvLockEvent *event = opaque;
+ VirtIOBlock *s = container_of(notifier, VirtIOBlock,
+ pause_notifier);
+
+ if (event->locking == s->paused) {
+ return;
+ }
+ if (event->locking) {
+ s->ops->pause(s);
+ } else {
+ s->ops->resume(s);
+ }
+}
+
static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -926,6 +975,9 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
blk_set_guest_block_size(s->blk, s->conf.conf.logical_block_size);
blk_iostatus_enable(s->blk);
+
+ s->pause_notifier.notify = virtio_blk_pause_handler;
+ blk_add_lock_unlock_notifier(s->blk, &s->pause_notifier);
}
static void virtio_blk_device_unrealize(DeviceState *dev, Error **errp)
@@ -933,6 +985,7 @@ static void virtio_blk_device_unrealize(DeviceState *dev, Error **errp)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOBlock *s = VIRTIO_BLK(dev);
+ notifier_remove(&s->pause_notifier);
remove_migration_state_change_notifier(&s->migration_state_notifier);
virtio_blk_data_plane_destroy(s->dataplane);
s->dataplane = NULL;
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 28b3436..4b6246f 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -42,12 +42,16 @@ struct VirtIOBlkConf
};
struct VirtIOBlockDataPlane;
-
+struct VirtIOBlock;
struct VirtIOBlockReq;
typedef struct {
/* Function to push to vq and notify guest */
void (*complete_request)(struct VirtIOBlockReq *req, unsigned char status);
+
+ /* Functions to pause/resume request handling */
+ void (*pause)(struct VirtIOBlock *vblk);
+ void (*resume)(struct VirtIOBlock *vblk);
} VirtIOBlockOps;
typedef struct VirtIOBlock {
@@ -62,6 +66,8 @@ typedef struct VirtIOBlock {
VMChangeStateEntry *change;
const VirtIOBlockOps *ops;
Notifier migration_state_notifier;
+ Notifier pause_notifier;
+ bool paused;
struct VirtIOBlockDataPlane *dataplane;
} VirtIOBlock;
--
2.4.1
next prev parent reply other threads:[~2015-06-02 3:23 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-06-02 3:21 [Qemu-devel] [PATCH v2 00/13] block: Protect block jobs with lock / unlock API Fam Zheng
2015-06-02 3:21 ` [Qemu-devel] [PATCH v2 01/13] block: Use bdrv_drain to replace uncessary bdrv_drain_all Fam Zheng
2015-06-16 16:01 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2015-07-07 11:55 ` Stefan Hajnoczi
2015-06-02 3:21 ` [Qemu-devel] [PATCH v2 02/13] block: Introduce bdrv_lock and bdrv_unlock API Fam Zheng
2015-06-16 16:07 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2015-06-24 2:47 ` Fam Zheng
2015-06-24 3:04 ` Fam Zheng
2015-06-24 9:14 ` Paolo Bonzini
2015-06-24 9:35 ` Stefan Hajnoczi
2015-06-02 3:21 ` [Qemu-devel] [PATCH v2 03/13] blockdev: Lock BDS during internal snapshot transaction Fam Zheng
2015-06-02 3:21 ` [Qemu-devel] [PATCH v2 04/13] blockdev: Lock BDS during external " Fam Zheng
2015-06-02 3:21 ` [Qemu-devel] [PATCH v2 05/13] blockdev: Lock BDS during drive-backup transaction Fam Zheng
2015-06-02 3:21 ` [Qemu-devel] [PATCH v2 06/13] blockdev: Lock BDS during blockdev-backup transaction Fam Zheng
2015-06-02 3:21 ` [Qemu-devel] [PATCH v2 07/13] mirror: Protect source between bdrv_drain and bdrv_swap Fam Zheng
2015-06-02 3:21 ` [Qemu-devel] [PATCH v2 08/13] block: Add bdrv_add_lock_unlock_notifier Fam Zheng
2015-06-02 3:21 ` [Qemu-devel] [PATCH v2 09/13] block-backend: Add blk_add_lock_unlock_notifier Fam Zheng
2015-06-02 3:21 ` [Qemu-devel] [PATCH v2 10/13] virtio-blk: Move complete_request to 'ops' structure Fam Zheng
2015-06-02 3:22 ` Fam Zheng [this message]
2015-06-02 3:22 ` [Qemu-devel] [PATCH v2 12/13] virtio-scsi-dataplane: Add backend lock listener Fam Zheng
2015-06-02 3:22 ` [Qemu-devel] [PATCH v2 13/13] nbd-server: Clear "can_read" when backend is locked Fam Zheng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1433215322-23529-12-git-send-email-famz@redhat.com \
--to=famz@redhat.com \
--cc=jcody@redhat.com \
--cc=kwolf@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).