From: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
To: qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
Marcelo Tosatti <mtosatti@redhat.com>,
Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
Subject: [Qemu-devel] [PATCH v6 03/16] block: make copy-on-read a per-request flag
Date: Wed, 18 Jan 2012 14:40:42 +0000 [thread overview]
Message-ID: <1326897655-2799-4-git-send-email-stefanha@linux.vnet.ibm.com> (raw)
In-Reply-To: <1326897655-2799-1-git-send-email-stefanha@linux.vnet.ibm.com>
Previously copy-on-read could only be enabled for all requests to a
block device. This means requests coming from the guest as well as
QEMU's internal requests would perform copy-on-read when enabled.
For image streaming we want to support finer-grained behavior than just
populating the image file from its backing image. Image streaming
supports partial streaming where a common backing image is preserved.
In this case guest requests should not perform copy-on-read because they
would indiscriminately copy data which should be left in a backing image
from the backing chain.
Introduce a per-request flag for copy-on-read so that a block device can
process both regular and copy-on-read requests. Overlapping reads and
writes still need to be serialized for correctness when copy-on-read is
happening, so add an in-flight reference count to track this.
Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
---
block.c | 49 ++++++++++++++++++++++++++++++++++++++-----------
block.h | 2 ++
block_int.h | 3 +++
trace-events | 3 ++-
4 files changed, 45 insertions(+), 12 deletions(-)
diff --git a/block.c b/block.c
index 43f6484..edfab49 100644
--- a/block.c
+++ b/block.c
@@ -48,6 +48,10 @@
#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
+typedef enum {
+ BDRV_REQ_COPY_ON_READ = 0x1,
+} BdrvRequestFlags;
+
static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
@@ -62,7 +66,8 @@ static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
int64_t sector_num, int nb_sectors,
QEMUIOVector *iov);
static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
@@ -1292,7 +1297,7 @@ static void coroutine_fn bdrv_rw_co_entry(void *opaque)
if (!rwco->is_write) {
rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
- rwco->nb_sectors, rwco->qiov);
+ rwco->nb_sectors, rwco->qiov, 0);
} else {
rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
rwco->nb_sectors, rwco->qiov);
@@ -1500,7 +1505,7 @@ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
return 0;
}
-static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
+static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
{
/* Perform I/O through a temporary buffer so that users who scribble over
@@ -1523,8 +1528,8 @@ static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
round_to_clusters(bs, sector_num, nb_sectors,
&cluster_sector_num, &cluster_nb_sectors);
- trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors,
- cluster_sector_num, cluster_nb_sectors);
+ trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
+ cluster_sector_num, cluster_nb_sectors);
iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
@@ -1559,7 +1564,8 @@ err:
* Handle a read request in coroutine context
*/
static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+ BdrvRequestFlags flags)
{
BlockDriver *drv = bs->drv;
BdrvTrackedRequest req;
@@ -1578,12 +1584,19 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
}
if (bs->copy_on_read) {
+ flags |= BDRV_REQ_COPY_ON_READ;
+ }
+ if (flags & BDRV_REQ_COPY_ON_READ) {
+ bs->copy_on_read_in_flight++;
+ }
+
+ if (bs->copy_on_read_in_flight) {
wait_for_overlapping_requests(bs, sector_num, nb_sectors);
}
tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
- if (bs->copy_on_read) {
+ if (flags & BDRV_REQ_COPY_ON_READ) {
int pnum;
ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
@@ -1592,7 +1605,7 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
}
if (!ret || pnum != nb_sectors) {
- ret = bdrv_co_copy_on_readv(bs, sector_num, nb_sectors, qiov);
+ ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
goto out;
}
}
@@ -1601,6 +1614,11 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
out:
tracked_request_end(&req);
+
+ if (flags & BDRV_REQ_COPY_ON_READ) {
+ bs->copy_on_read_in_flight--;
+ }
+
return ret;
}
@@ -1609,7 +1627,16 @@ int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
{
trace_bdrv_co_readv(bs, sector_num, nb_sectors);
- return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov);
+ return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
+}
+
+int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
+{
+ trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
+
+ return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
+ BDRV_REQ_COPY_ON_READ);
}
/*
@@ -1637,7 +1664,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
bdrv_io_limits_intercept(bs, true, nb_sectors);
}
- if (bs->copy_on_read) {
+ if (bs->copy_on_read_in_flight) {
wait_for_overlapping_requests(bs, sector_num, nb_sectors);
}
@@ -3144,7 +3171,7 @@ static void coroutine_fn bdrv_co_do_rw(void *opaque)
if (!acb->is_write) {
acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
- acb->req.nb_sectors, acb->req.qiov);
+ acb->req.nb_sectors, acb->req.qiov, 0);
} else {
acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
acb->req.nb_sectors, acb->req.qiov);
diff --git a/block.h b/block.h
index 3bd4398..a3b0b80 100644
--- a/block.h
+++ b/block.h
@@ -142,6 +142,8 @@ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
const void *buf, int count);
int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov);
+int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov);
int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
diff --git a/block_int.h b/block_int.h
index 311bd2a..07d67ed 100644
--- a/block_int.h
+++ b/block_int.h
@@ -218,6 +218,9 @@ struct BlockDriverState {
BlockDriverState *backing_hd;
BlockDriverState *file;
+ /* number of in-flight copy-on-read requests */
+ unsigned int copy_on_read_in_flight;
+
/* async read/write emulation */
void *sync_aiocb;
diff --git a/trace-events b/trace-events
index c18435b..656127c 100644
--- a/trace-events
+++ b/trace-events
@@ -65,9 +65,10 @@ bdrv_aio_readv(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %
bdrv_aio_writev(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d"
bdrv_co_readv(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
+bdrv_co_copy_on_readv(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
bdrv_co_writev(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
bdrv_co_io_em(void *bs, int64_t sector_num, int nb_sectors, int is_write, void *acb) "bs %p sector_num %"PRId64" nb_sectors %d is_write %d acb %p"
-bdrv_co_copy_on_readv(void *bs, int64_t sector_num, int nb_sectors, int64_t cluster_sector_num, int cluster_nb_sectors) "bs %p sector_num %"PRId64" nb_sectors %d cluster_sector_num %"PRId64" cluster_nb_sectors %d"
+bdrv_co_do_copy_on_readv(void *bs, int64_t sector_num, int nb_sectors, int64_t cluster_sector_num, int cluster_nb_sectors) "bs %p sector_num %"PRId64" nb_sectors %d cluster_sector_num %"PRId64" cluster_nb_sectors %d"
# hw/virtio-blk.c
virtio_blk_req_complete(void *req, int status) "req %p status %d"
--
1.7.8.3
next prev parent reply other threads:[~2012-01-18 14:43 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-01-18 14:40 [Qemu-devel] [PATCH v6 00/16] block: generic image streaming Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 01/16] coroutine: add co_sleep_ns() coroutine sleep function Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 02/16] block: check bdrv_in_use() before blockdev operations Stefan Hajnoczi
2012-01-18 14:40 ` Stefan Hajnoczi [this message]
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 04/16] block: add BlockJob interface for long-running operations Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 05/16] block: add image streaming block job Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 06/16] block: rate-limit streaming operations Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 07/16] qmp: add block_stream command Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 08/16] qmp: add block_job_set_speed command Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 09/16] qmp: add block_job_cancel command Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 10/16] qmp: add query-block-jobs Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 11/16] blockdev: make image streaming safe across hotplug Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 12/16] block: add bdrv_find_backing_image Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 13/16] add QERR_BASE_NOT_FOUND Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 14/16] block: add support for partial streaming Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 15/16] docs: describe live block operations Stefan Hajnoczi
2012-01-18 14:40 ` [Qemu-devel] [PATCH v6 16/16] test: add image streaming test cases Stefan Hajnoczi
2012-01-19 14:53 ` [Qemu-devel] [PATCH v6 00/16] block: generic image streaming Kevin Wolf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1326897655-2799-4-git-send-email-stefanha@linux.vnet.ibm.com \
--to=stefanha@linux.vnet.ibm.com \
--cc=kwolf@redhat.com \
--cc=mtosatti@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).