qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Fam Zheng <famz@redhat.com>
To: Ashijeet Acharya <ashijeetacharya@gmail.com>
Cc: kwolf@redhat.com, jsnow@redhat.com, mreitz@redhat.com,
	stefanha@gmail.com, qemu-devel@nongnu.org, qemu-block@nongnu.org
Subject: Re: [Qemu-devel] [PATCH v6 7/8] vmdk: Update metadata for multiple clusters
Date: Tue, 27 Jun 2017 16:14:00 +0800	[thread overview]
Message-ID: <20170627081400.GD14166@lemon.lan> (raw)
In-Reply-To: <1496649172-26982-8-git-send-email-ashijeetacharya@gmail.com>

On Mon, 06/05 13:22, Ashijeet Acharya wrote:
> Include a next pointer in VmdkMetaData struct to point to the previous
> allocated L2 table. Modify vmdk_L2update to start updating metadata for
> allocation of multiple clusters at once.
> 
> Signed-off-by: Ashijeet Acharya <ashijeetacharya@gmail.com>
> ---
>  block/vmdk.c | 128 ++++++++++++++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 101 insertions(+), 27 deletions(-)
> 
> diff --git a/block/vmdk.c b/block/vmdk.c
> index b671dc9..9fa2414 100644
> --- a/block/vmdk.c
> +++ b/block/vmdk.c
> @@ -137,6 +137,8 @@ typedef struct VmdkMetaData {
>      int valid;
>      uint32_t *l2_cache_entry;
>      uint32_t nb_clusters;
> +    uint32_t offset;
> +    struct VmdkMetaData *next;
>  } VmdkMetaData;
>  
>  typedef struct VmdkGrainMarker {
> @@ -1116,34 +1118,87 @@ exit:
>      return ret;
>  }
>  
> -static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
> -                         uint32_t offset)
> +static int vmdk_alloc_cluster_link_l2(VmdkExtent *extent,
> +                                      VmdkMetaData *m_data, bool zeroed)
>  {
> -    offset = cpu_to_le32(offset);
> +    int i;
> +    uint32_t offset, temp_offset;
> +    int *l2_table_array;
> +    int l2_array_size;
> +
> +    if (zeroed) {
> +        temp_offset = VMDK_GTE_ZEROED;
> +    } else {
> +        temp_offset = m_data->offset;
> +    }
> +
> +    l2_array_size = sizeof(uint32_t) * m_data->nb_clusters;
> +    l2_table_array = qemu_try_blockalign(extent->file->bs,
> +                                         QEMU_ALIGN_UP(l2_array_size,
> +                                                       BDRV_SECTOR_SIZE));
> +    if (l2_table_array == NULL) {
> +        return VMDK_ERROR;
> +    }
> +    memset(l2_table_array, 0, QEMU_ALIGN_UP(l2_array_size, BDRV_SECTOR_SIZE));
>      /* update L2 table */
> +    offset = temp_offset;
> +    for (i = 0; i < m_data->nb_clusters; i++) {
> +        l2_table_array[i] = cpu_to_le32(offset);
> +        if (!zeroed) {
> +            offset += 128;

s/128/extent->cluster_sectors/?

> +        }
> +    }
>      if (bdrv_pwrite_sync(extent->file,
> -                ((int64_t)m_data->l2_offset * 512)
> -                    + (m_data->l2_index * sizeof(offset)),
> -                &offset, sizeof(offset)) < 0) {
> +                         ((int64_t)m_data->l2_offset * 512)
> +                         + ((m_data->l2_index) * sizeof(offset)),
> +                         l2_table_array, l2_array_size) < 0) {
>          return VMDK_ERROR;
>      }
>      /* update backup L2 table */
>      if (extent->l1_backup_table_offset != 0) {
>          m_data->l2_offset = extent->l1_backup_table[m_data->l1_index];
>          if (bdrv_pwrite_sync(extent->file,
> -                    ((int64_t)m_data->l2_offset * 512)
> -                        + (m_data->l2_index * sizeof(offset)),
> -                    &offset, sizeof(offset)) < 0) {
> +                             ((int64_t)m_data->l2_offset * 512)
> +                             + ((m_data->l2_index) * sizeof(offset)),
> +                             l2_table_array, l2_array_size) < 0) {
>              return VMDK_ERROR;
>          }
>      }
> +
> +    offset = temp_offset;
>      if (m_data->l2_cache_entry) {
> -        *m_data->l2_cache_entry = offset;
> +        for (i = 0; i < m_data->nb_clusters; i++) {
> +            *m_data->l2_cache_entry = cpu_to_le32(offset);
> +            m_data->l2_cache_entry++;
> +
> +            if (!zeroed) {
> +                offset += 128;

Ditto.

> +            }
> +        }
>      }
>  
> +    qemu_vfree(l2_table_array);
>      return VMDK_OK;
>  }
>  
> +static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
> +                         bool zeroed)
> +{
> +    int ret;
> +
> +    while (m_data->next != NULL) {
> +
> +        ret = vmdk_alloc_cluster_link_l2(extent, m_data, zeroed);
> +        if (ret < 0) {
> +            return ret;
> +        }
> +
> +        m_data = m_data->next;
> +     }
> +
> +     return VMDK_OK;
> +}
> +
>  /*
>   * vmdk_l2load
>   *
> @@ -1261,9 +1316,10 @@ static int get_cluster_table(VmdkExtent *extent, uint64_t offset,
>   *
>   *   VMDK_ERROR:    in error cases
>   */
> +
>  static int vmdk_handle_alloc(BlockDriverState *bs, VmdkExtent *extent,
>                               uint64_t offset, uint64_t *cluster_offset,
> -                             int64_t *bytes, VmdkMetaData *m_data,
> +                             int64_t *bytes, VmdkMetaData **m_data,
>                               bool allocate, uint32_t *alloc_clusters_counter)
>  {
>      int l1_index, l2_offset, l2_index;
> @@ -1272,6 +1328,7 @@ static int vmdk_handle_alloc(BlockDriverState *bs, VmdkExtent *extent,
>      uint32_t nb_clusters;
>      bool zeroed = false;
>      uint64_t skip_start_bytes, skip_end_bytes;
> +    VmdkMetaData *old_m_data;
>      int ret;
>  
>      ret = get_cluster_table(extent, offset, &l1_index, &l2_offset,
> @@ -1323,13 +1380,21 @@ static int vmdk_handle_alloc(BlockDriverState *bs, VmdkExtent *extent,
>          if (ret < 0) {
>              return ret;
>          }
> -        if (m_data) {
> -            m_data->valid = 1;
> -            m_data->l1_index = l1_index;
> -            m_data->l2_index = l2_index;
> -            m_data->l2_offset = l2_offset;
> -            m_data->l2_cache_entry = &l2_table[l2_index];
> -            m_data->nb_clusters = nb_clusters;
> +
> +        if (*m_data) {

Please check m_data != NULL if it's possible, otherwise assert it.

> +            old_m_data = *m_data;
> +            *m_data = g_malloc0(sizeof(**m_data));
> +
> +            **m_data = (VmdkMetaData) {
> +                .valid            =    1,
> +                .l1_index         =    l1_index,
> +                .l2_index         =    l2_index,
> +                .l2_offset        =    l2_offset,
> +                .l2_cache_entry   =    &l2_table[l2_index],
> +                .nb_clusters      =    nb_clusters,
> +                .offset           =    cluster_sector,
> +                .next             =    old_m_data,
> +            };
>          }
>      }
>      *cluster_offset = cluster_sector << BDRV_SECTOR_BITS;
> @@ -1792,10 +1857,12 @@ static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
>      int64_t offset_in_cluster, n_bytes;
>      uint64_t cluster_offset;
>      uint64_t bytes_done = 0;
> -    VmdkMetaData m_data;
>      uint64_t extent_end;
> +    VmdkMetaData *m_data;
>      uint32_t total_alloc_clusters = 0;
>  
> +    m_data = g_malloc0(sizeof(*m_data));

You can use "g_new0(VmdkMetaData, 1)" which is more readable.

> +
>      if (DIV_ROUND_UP(offset, BDRV_SECTOR_SIZE) > bs->total_sectors) {
>          error_report("Wrong offset: offset=0x%" PRIx64
>                       " total_sectors=0x%" PRIx64,

Fam

  parent reply	other threads:[~2017-06-27  8:14 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-05  7:52 [Qemu-devel] [PATCH v6 0/8] Optimize VMDK I/O by allocating multiple clusters Ashijeet Acharya
2017-06-05  7:52 ` [Qemu-devel] [PATCH v6 1/8] vmdk: Move vmdk_find_offset_in_cluster() to the top Ashijeet Acharya
2017-06-05  7:52 ` [Qemu-devel] [PATCH v6 2/8] vmdk: Rename get_whole_cluster() to vmdk_perform_cow() Ashijeet Acharya
2017-06-05  7:52 ` [Qemu-devel] [PATCH v6 3/8] vmdk: Rename get_cluster_offset() to vmdk_get_cluster_offset() Ashijeet Acharya
2017-06-05  7:52 ` [Qemu-devel] [PATCH v6 4/8] vmdk: Factor out metadata loading code out of vmdk_get_cluster_offset() Ashijeet Acharya
2017-06-05  7:52 ` [Qemu-devel] [PATCH v6 5/8] vmdk: Set maximum bytes allocated in one cycle Ashijeet Acharya
2017-06-05  7:52 ` [Qemu-devel] [PATCH v6 6/8] vmdk: New functions to assist allocating multiple clusters Ashijeet Acharya
2017-06-27  8:02   ` Fam Zheng
2017-06-29  7:42     ` Ashijeet Acharya
2017-06-05  7:52 ` [Qemu-devel] [PATCH v6 7/8] vmdk: Update metadata for " Ashijeet Acharya
2017-06-27  8:04   ` Fam Zheng
2017-06-29  8:48     ` Ashijeet Acharya
2017-06-27  8:14   ` Fam Zheng [this message]
2017-06-05  7:52 ` [Qemu-devel] [PATCH v6 8/8] vmdk: Make vmdk_get_cluster_offset() return cluster offset only Ashijeet Acharya
2017-06-27  8:15   ` Fam Zheng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170627081400.GD14166@lemon.lan \
    --to=famz@redhat.com \
    --cc=ashijeetacharya@gmail.com \
    --cc=jsnow@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).