From: Fam Zheng <famz@redhat.com>
To: qemu-devel@nongnu.org
Cc: kwolf@redhat.com, Fam Zheng <famz@redhat.com>, stefanha@redhat.com
Subject: [Qemu-devel] [PATCH v3 6/6] vmdk: add bdrv_co_write_zeroes
Date: Wed, 24 Apr 2013 20:44:35 +0800 [thread overview]
Message-ID: <1366807475-26350-7-git-send-email-famz@redhat.com> (raw)
In-Reply-To: <1366807475-26350-1-git-send-email-famz@redhat.com>
Use special offset to write zeroes efficiently, when zeroed-grain GTE is
available. If zero-write an allocated cluster, cluster is leaked because
its offset pointer is overwritten by "0x1".
Signed-off-by: Fam Zheng <famz@redhat.com>
---
block/vmdk.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 63 insertions(+), 14 deletions(-)
diff --git a/block/vmdk.c b/block/vmdk.c
index 16e1417..90cb071 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -905,6 +905,13 @@ static int get_cluster_offset(BlockDriverState *bs,
l2_index = ((offset >> 9) / extent->cluster_sectors) % extent->l2_size;
*cluster_offset = le32_to_cpu(l2_table[l2_index]);
+ if (m_data) {
+ m_data->valid = 1;
+ m_data->l1_index = l1_index;
+ m_data->l2_index = l2_index;
+ m_data->offset = *cluster_offset;
+ m_data->l2_offset = extent->l1_table[m_data->l1_index];
+ }
if (extent->has_zero_grain && *cluster_offset == VMDK_GTE_ZEROED) {
zeroed = true;
}
@@ -1165,8 +1172,17 @@ static coroutine_fn int vmdk_co_read(BlockDriverState *bs, int64_t sector_num,
return ret;
}
+/**
+ * vmdk_write:
+ * @zeroed: buf is ignored (data is zero), use zeroed_grain GTE feature
+ * if possible, otherwise return -ENOTSUP.
+ * @zero_dry_run: used for zeroed == true only, don't update L2 table, just
+ *
+ * Returns: error code with 0 for success.
+ */
static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
- const uint8_t *buf, int nb_sectors)
+ const uint8_t *buf, int nb_sectors,
+ bool zeroed, bool zero_dry_run)
{
BDRVVmdkState *s = bs->opaque;
VmdkExtent *extent = NULL;
@@ -1212,7 +1228,7 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
&cluster_offset);
}
}
- if (ret) {
+ if (ret == VMDK_ERROR) {
return -EINVAL;
}
extent_begin_sector = extent->end_sector - extent->sectors;
@@ -1222,17 +1238,34 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
if (n > nb_sectors) {
n = nb_sectors;
}
-
- ret = vmdk_write_extent(extent,
- cluster_offset, index_in_cluster * 512,
- buf, n, sector_num);
- if (ret) {
- return ret;
- }
- if (m_data.valid) {
- /* update L2 tables */
- if (vmdk_L2update(extent, &m_data) == -1) {
- return -EIO;
+ if (zeroed) {
+ /* Do zeroed write, buf is ignored */
+ if (extent->has_zero_grain &&
+ index_in_cluster == 0 &&
+ n >= extent->cluster_sectors) {
+ n = extent->cluster_sectors;
+ if (!zero_dry_run) {
+ m_data.offset = cpu_to_le32(VMDK_GTE_ZEROED);
+ /* update L2 tables */
+ if (vmdk_L2update(extent, &m_data) != VMDK_OK) {
+ return -EIO;
+ }
+ }
+ } else {
+ return -ENOTSUP;
+ }
+ } else {
+ ret = vmdk_write_extent(extent,
+ cluster_offset, index_in_cluster * 512,
+ buf, n, sector_num);
+ if (ret) {
+ return ret;
+ }
+ if (m_data.valid) {
+ /* update L2 tables */
+ if (vmdk_L2update(extent, &m_data) != VMDK_OK) {
+ return -EIO;
+ }
}
}
nb_sectors -= n;
@@ -1258,7 +1291,22 @@ static coroutine_fn int vmdk_co_write(BlockDriverState *bs, int64_t sector_num,
int ret;
BDRVVmdkState *s = bs->opaque;
qemu_co_mutex_lock(&s->lock);
- ret = vmdk_write(bs, sector_num, buf, nb_sectors);
+ ret = vmdk_write(bs, sector_num, buf, nb_sectors, false, false);
+ qemu_co_mutex_unlock(&s->lock);
+ return ret;
+}
+
+static int coroutine_fn vmdk_co_write_zeroes(BlockDriverState *bs,
+ int64_t sector_num,
+ int nb_sectors)
+{
+ int ret;
+ BDRVVmdkState *s = bs->opaque;
+ qemu_co_mutex_lock(&s->lock);
+ ret = vmdk_write(bs, sector_num, NULL, nb_sectors, true, true);
+ if (!ret) {
+ ret = vmdk_write(bs, sector_num, NULL, nb_sectors, true, false);
+ }
qemu_co_mutex_unlock(&s->lock);
return ret;
}
@@ -1738,6 +1786,7 @@ static BlockDriver bdrv_vmdk = {
.bdrv_reopen_prepare = vmdk_reopen_prepare,
.bdrv_read = vmdk_co_read,
.bdrv_write = vmdk_co_write,
+ .bdrv_co_write_zeroes = vmdk_co_write_zeroes,
.bdrv_close = vmdk_close,
.bdrv_create = vmdk_create,
.bdrv_co_flush_to_disk = vmdk_co_flush,
--
1.8.1.4
next prev parent reply other threads:[~2013-04-24 12:45 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-04-24 12:44 [Qemu-devel] [PATCH v3 0/6] vmdk: zeroed-grain GTE support Fam Zheng
2013-04-24 12:44 ` [Qemu-devel] [PATCH v3 1/6] vmdk: named return code Fam Zheng
2013-04-24 12:44 ` [Qemu-devel] [PATCH v3 2/6] vmdk: add support for “zeroed‐grain” GTE Fam Zheng
2013-04-24 12:44 ` [Qemu-devel] [PATCH v3 3/6] vmdk: Add option to create zeroed-grain image Fam Zheng
2013-04-24 12:44 ` [Qemu-devel] [PATCH v3 4/6] vmdk: change magic number to macro Fam Zheng
2013-04-24 12:44 ` [Qemu-devel] [PATCH v3 5/6] vmdk: store fields of VmdkMetaData in cpu endian Fam Zheng
2013-04-25 13:20 ` Stefan Hajnoczi
2013-04-24 12:44 ` Fam Zheng [this message]
2013-04-25 13:20 ` [Qemu-devel] [PATCH v3 6/6] vmdk: add bdrv_co_write_zeroes Stefan Hajnoczi
2013-04-25 13:25 ` [Qemu-devel] [PATCH v3 0/6] vmdk: zeroed-grain GTE support Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1366807475-26350-7-git-send-email-famz@redhat.com \
--to=famz@redhat.com \
--cc=kwolf@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).