From: Fam Zheng <famz@redhat.com>
To: Ashijeet Acharya <ashijeetacharya@gmail.com>
Cc: kwolf@redhat.com, qemu-block@nongnu.org, stefanha@gmail.com,
qemu-devel@nongnu.org, mreitz@redhat.com, jsnow@redhat.com
Subject: Re: [Qemu-devel] [PATCH v4 7/8] vmdk: Update metadata for multiple clusters
Date: Thu, 1 Jun 2017 22:20:24 +0800 [thread overview]
Message-ID: <20170601142024.GH13127@lemon.lan> (raw)
In-Reply-To: <1492838021-10538-8-git-send-email-ashijeetacharya@gmail.com>
On Sat, 04/22 10:43, Ashijeet Acharya wrote:
> Include a next pointer in VmdkMetaData struct to point to the previous
> allocated L2 table. Modify vmdk_L2update to start updating metadata for
> allocation of multiple clusters at once.
>
> Signed-off-by: Ashijeet Acharya <ashijeetacharya@gmail.com>
> ---
> block/vmdk.c | 129 ++++++++++++++++++++++++++++++++++++++++++++++-------------
> 1 file changed, 102 insertions(+), 27 deletions(-)
>
> diff --git a/block/vmdk.c b/block/vmdk.c
> index 8d34cd9..e52c373 100644
> --- a/block/vmdk.c
> +++ b/block/vmdk.c
> @@ -137,6 +137,8 @@ typedef struct VmdkMetaData {
> int valid;
> uint32_t *l2_cache_entry;
> uint32_t nb_clusters;
> + uint32_t offset;
> + struct VmdkMetaData *next;
> } VmdkMetaData;
>
> typedef struct VmdkGrainMarker {
> @@ -1116,34 +1118,89 @@ exit:
> return ret;
> }
>
> -static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
> - uint32_t offset)
> +static int vmdk_alloc_cluster_link_l2(VmdkExtent *extent,
> + VmdkMetaData *m_data, bool zeroed)
> {
> - offset = cpu_to_le32(offset);
> + int i;
> + uint32_t offset, temp_offset;
> + int *l2_table_array;
> + int l2_array_size;
> +
> + if (zeroed) {
> + temp_offset = VMDK_GTE_ZEROED;
> + } else {
> + temp_offset = m_data->offset;
> + }
> +
> + temp_offset = cpu_to_le32(temp_offset);
> +
> + l2_array_size = sizeof(uint32_t) * m_data->nb_clusters;
> + l2_table_array = qemu_try_blockalign(extent->file->bs,
> + QEMU_ALIGN_UP(l2_array_size,
> + BDRV_SECTOR_SIZE));
> + if (l2_table_array == NULL) {
> + return VMDK_ERROR;
> + }
> + memset(l2_table_array, 0, QEMU_ALIGN_UP(l2_array_size, BDRV_SECTOR_SIZE));
> /* update L2 table */
> + offset = temp_offset;
> + for (i = 0; i < m_data->nb_clusters; i++) {
> + l2_table_array[i] = offset;
> + if (!zeroed) {
> + offset += 128;
Wrong calculation. offset is in le32 since it was assigned from temp_offset,
which was in turn converted away from cpu endianness a few lines above.
> + }
> + }
> if (bdrv_pwrite_sync(extent->file,
> - ((int64_t)m_data->l2_offset * 512)
> - + (m_data->l2_index * sizeof(offset)),
> - &offset, sizeof(offset)) < 0) {
> + ((int64_t)m_data->l2_offset * 512)
> + + ((m_data->l2_index) * sizeof(offset)),
> + l2_table_array, l2_array_size) < 0) {
> return VMDK_ERROR;
> }
> /* update backup L2 table */
> if (extent->l1_backup_table_offset != 0) {
> m_data->l2_offset = extent->l1_backup_table[m_data->l1_index];
> if (bdrv_pwrite_sync(extent->file,
> - ((int64_t)m_data->l2_offset * 512)
> - + (m_data->l2_index * sizeof(offset)),
> - &offset, sizeof(offset)) < 0) {
> + ((int64_t)m_data->l2_offset * 512)
> + + ((m_data->l2_index) * sizeof(offset)),
> + l2_table_array, l2_array_size) < 0) {
> return VMDK_ERROR;
> }
> }
> +
> + offset = temp_offset;
> if (m_data->l2_cache_entry) {
> - *m_data->l2_cache_entry = offset;
> + for (i = 0; i < m_data->nb_clusters; i++) {
> + *m_data->l2_cache_entry = offset;
> + m_data->l2_cache_entry++;
> +
> + if (!zeroed) {
> + offset += 128;
Ditto.
> + }
> + }
> }
>
> + qemu_vfree(l2_table_array);
> return VMDK_OK;
> }
>
> +static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
> + bool zeroed)
> +{
> + int ret;
> +
> + while (m_data->next != NULL) {
> +
> + ret = vmdk_alloc_cluster_link_l2(extent, m_data, zeroed);
> + if (ret < 0) {
> + return ret;
> + }
> +
> + m_data = m_data->next;
> + }
> +
> + return VMDK_OK;
> +}
> +
> /*
> * vmdk_l2load
> *
> @@ -1263,7 +1320,7 @@ static int get_cluster_table(VmdkExtent *extent, uint64_t offset,
> */
> static int handle_alloc(BlockDriverState *bs, VmdkExtent *extent,
> uint64_t offset, uint64_t *cluster_offset,
> - int64_t *bytes, VmdkMetaData *m_data,
> + int64_t *bytes, VmdkMetaData **m_data,
> bool allocate, uint32_t *total_alloc_clusters)
> {
> int l1_index, l2_offset, l2_index;
> @@ -1272,6 +1329,7 @@ static int handle_alloc(BlockDriverState *bs, VmdkExtent *extent,
> uint32_t nb_clusters;
> bool zeroed = false;
> uint64_t skip_start_bytes, skip_end_bytes;
> + VmdkMetaData *old_m_data;
> int ret;
>
> ret = get_cluster_table(extent, offset, &l1_index, &l2_offset,
> @@ -1323,13 +1381,21 @@ static int handle_alloc(BlockDriverState *bs, VmdkExtent *extent,
> if (ret < 0) {
> return ret;
> }
> - if (m_data) {
> - m_data->valid = 1;
> - m_data->l1_index = l1_index;
> - m_data->l2_index = l2_index;
> - m_data->l2_offset = l2_offset;
> - m_data->l2_cache_entry = &l2_table[l2_index];
> - m_data->nb_clusters = nb_clusters;
> +
> + if (*m_data) {
> + old_m_data = *m_data;
> + *m_data = g_malloc0(sizeof(**m_data));
> +
> + **m_data = (VmdkMetaData) {
> + .valid = 1,
> + .l1_index = l1_index,
> + .l2_index = l2_index,
> + .l2_offset = l2_offset,
> + .l2_cache_entry = &l2_table[l2_index],
> + .nb_clusters = nb_clusters,
> + .offset = cluster_sector,
> + .next = old_m_data,
> + };
> }
> }
> *cluster_offset = cluster_sector << BDRV_SECTOR_BITS;
> @@ -1358,7 +1424,7 @@ static int handle_alloc(BlockDriverState *bs, VmdkExtent *extent,
> */
> static int vmdk_alloc_clusters(BlockDriverState *bs,
> VmdkExtent *extent,
> - VmdkMetaData *m_data, uint64_t offset,
> + VmdkMetaData **m_data, uint64_t offset,
> bool allocate, uint64_t *cluster_offset,
> int64_t bytes,
> uint32_t *total_alloc_clusters)
> @@ -1378,8 +1444,8 @@ static int vmdk_alloc_clusters(BlockDriverState *bs,
> new_cluster_offset = 0;
> *cluster_offset = 0;
> n_bytes = 0;
> - if (m_data) {
> - m_data->valid = 0;
> + if (*m_data) {
> + (*m_data)->valid = 0;
> }
>
> /* due to L2 table margins all bytes may not get allocated at once */
> @@ -1792,10 +1858,12 @@ static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
> int64_t offset_in_cluster, n_bytes;
> uint64_t cluster_offset;
> uint64_t bytes_done = 0;
> - VmdkMetaData m_data;
> uint64_t extent_end;
> + VmdkMetaData *m_data;
> uint32_t total_alloc_clusters = 0;
>
> + m_data = g_malloc0(sizeof(*m_data));
> +
> if (DIV_ROUND_UP(offset, BDRV_SECTOR_SIZE) > bs->total_sectors) {
> error_report("Wrong offset: offset=0x%" PRIx64
> " total_sectors=0x%" PRIx64,
> @@ -1804,6 +1872,7 @@ static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
> }
>
> while (bytes > 0) {
> + m_data->next = NULL;
> extent = find_extent(s, offset >> BDRV_SECTOR_BITS, extent);
> if (!extent) {
> return -EIO;
> @@ -1849,7 +1918,7 @@ static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
> total_alloc_clusters;
> if (!zero_dry_run) {
> /* update L2 tables */
> - if (vmdk_L2update(extent, &m_data, VMDK_GTE_ZEROED)
> + if (vmdk_L2update(extent, m_data, zeroed)
> != VMDK_OK) {
> return -EIO;
> }
> @@ -1863,11 +1932,9 @@ static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
> if (ret) {
> return ret;
> }
> - if (m_data.valid) {
> + if (m_data->valid) {
> /* update L2 tables */
> - if (vmdk_L2update(extent, &m_data,
> - cluster_offset >> BDRV_SECTOR_BITS)
> - != VMDK_OK) {
> + if (vmdk_L2update(extent, m_data, zeroed) != VMDK_OK) {
> return -EIO;
> }
> }
> @@ -1876,6 +1943,13 @@ static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
> offset += n_bytes;
> bytes_done += n_bytes;
>
> + while (m_data->next != NULL) {
> + VmdkMetaData *next;
> + next = m_data->next;
> + g_free(m_data);
> + m_data = next;
> + }
> +
> /* update CID on the first write every time the virtual disk is
> * opened */
> if (!s->cid_updated) {
> @@ -1886,6 +1960,7 @@ static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
> s->cid_updated = true;
> }
> }
> + g_free(m_data);
> return 0;
> }
>
> --
> 2.6.2
>
>
Fam
next prev parent reply other threads:[~2017-06-01 14:20 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-04-22 5:13 [Qemu-devel] [PATCH v4 0/8] Optimize VMDK I/O by allocating multiple clusters Ashijeet Acharya
2017-04-22 5:13 ` [Qemu-devel] [PATCH v4 1/8] vmdk: Move vmdk_find_offset_in_cluster() to the top Ashijeet Acharya
2017-04-22 5:13 ` [Qemu-devel] [PATCH v4 2/8] vmdk: Rename get_whole_cluster() to vmdk_perform_cow() Ashijeet Acharya
2017-04-22 5:13 ` [Qemu-devel] [PATCH v4 3/8] vmdk: Rename get_cluster_offset() to vmdk_get_cluster_offset() Ashijeet Acharya
2017-06-01 12:47 ` Fam Zheng
2017-04-22 5:13 ` [Qemu-devel] [PATCH v4 4/8] vmdk: Factor out metadata loading code out of vmdk_get_cluster_offset() Ashijeet Acharya
2017-06-01 13:03 ` Fam Zheng
2017-04-22 5:13 ` [Qemu-devel] [PATCH v4 5/8] vmdk: Set maximum bytes allocated in one cycle Ashijeet Acharya
2017-06-01 13:14 ` Fam Zheng
2017-04-22 5:13 ` [Qemu-devel] [PATCH v4 6/8] vmdk: New functions to assist allocating multiple clusters Ashijeet Acharya
2017-06-01 13:57 ` Fam Zheng
2017-06-03 11:48 ` Ashijeet Acharya
2017-04-22 5:13 ` [Qemu-devel] [PATCH v4 7/8] vmdk: Update metadata for " Ashijeet Acharya
2017-06-01 14:20 ` Fam Zheng [this message]
2017-04-22 5:13 ` [Qemu-devel] [PATCH v4 8/8] vmdk: Make vmdk_get_cluster_offset() return cluster offset only Ashijeet Acharya
2017-06-01 14:46 ` [Qemu-devel] [PATCH v4 0/8] Optimize VMDK I/O by allocating multiple clusters Fam Zheng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170601142024.GH13127@lemon.lan \
--to=famz@redhat.com \
--cc=ashijeetacharya@gmail.com \
--cc=jsnow@redhat.com \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).