qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Fam Zheng <famz@redhat.com>
To: Ashijeet Acharya <ashijeetacharya@gmail.com>
Cc: Kevin Wolf <kwolf@redhat.com>, qemu block <qemu-block@nongnu.org>,
	Stefan Hajnoczi <stefanha@gmail.com>,
	QEMU Developers <qemu-devel@nongnu.org>,
	Max Reitz <mreitz@redhat.com>, John Snow <jsnow@redhat.com>
Subject: Re: [Qemu-devel] [PATCH v2 7/7] vmdk: Update metadata for multiple clusters
Date: Fri, 31 Mar 2017 17:08:06 +0800	[thread overview]
Message-ID: <20170331090806.GH11195@lemon.lan> (raw)
In-Reply-To: <CAC2QTZaL2aDatNoKoxEWn0XYxtfD6RDpB9gWn9h4+QE0vg2mMw@mail.gmail.com>

On Fri, 03/31 14:17, Ashijeet Acharya wrote:
> On Fri, Mar 31, 2017 at 12:56 PM, Fam Zheng <famz@redhat.com> wrote:
> > On Sat, 03/25 16:48, Ashijeet Acharya wrote:
> >> Include a next pointer in VmdkMetaData struct to point to the previous
> >> allocated L2 table. Modify vmdk_L2update to start updating metadata for
> >> allocation of multiple clusters at once.
> >>
> >> Signed-off-by: Ashijeet Acharya <ashijeetacharya@gmail.com>
> >> ---
> >>  block/vmdk.c | 131 ++++++++++++++++++++++++++++++++++++++++++++++-------------
> >>  1 file changed, 102 insertions(+), 29 deletions(-)
> >>
> >> diff --git a/block/vmdk.c b/block/vmdk.c
> >> index 3de8b8f..4517409 100644
> >> --- a/block/vmdk.c
> >> +++ b/block/vmdk.c
> >> @@ -137,6 +137,8 @@ typedef struct VmdkMetaData {
> >>      int valid;
> >>      uint32_t *l2_cache_entry;
> >>      uint32_t nb_clusters;
> >> +    uint32_t offset;
> >> +    struct VmdkMetaData *next;
> >>  } VmdkMetaData;
> >>
> >>  typedef struct VmdkGrainMarker {
> >> @@ -1037,29 +1039,81 @@ static void vmdk_refresh_limits(BlockDriverState *bs, Error **errp)
> >>      }
> >>  }
> >>
> >> -static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
> >> -                         uint32_t offset)
> >> +static int vmdk_alloc_cluster_link_l2(VmdkExtent *extent,
> >> +                                      VmdkMetaData *m_data, bool zeroed)
> >>  {
> >> -    offset = cpu_to_le32(offset);
> >> +    int i;
> >> +    uint32_t offset, temp_offset;
> >> +
> >> +    if (zeroed) {
> >> +        temp_offset = VMDK_GTE_ZEROED;
> >> +    } else {
> >> +        temp_offset = m_data->offset;
> >> +    }
> >> +
> >> +    temp_offset = cpu_to_le32(temp_offset);
> >> +
> >>      /* update L2 table */
> >> -    if (bdrv_pwrite_sync(extent->file,
> >> +    offset = temp_offset;
> >> +    for (i = 0; i < m_data->nb_clusters; i++) {
> >> +        if (bdrv_pwrite_sync(extent->file,
> >>                  ((int64_t)m_data->l2_offset * 512)
> >> -                    + (m_data->l2_index * sizeof(offset)),
> >> -                &offset, sizeof(offset)) < 0) {
> >> -        return VMDK_ERROR;
> >> +                    + ((m_data->l2_index + i) * sizeof(offset)),
> >> +                &(offset), sizeof(offset)) < 0) {
> >> +            return VMDK_ERROR;
> >> +        }
> >> +        if (!zeroed) {
> >> +            offset += 128;
> >> +        }
> >>      }
> >> +
> >>      /* update backup L2 table */
> >> +    offset = temp_offset;
> >>      if (extent->l1_backup_table_offset != 0) {
> >>          m_data->l2_offset = extent->l1_backup_table[m_data->l1_index];
> >> -        if (bdrv_pwrite_sync(extent->file,
> >> -                    ((int64_t)m_data->l2_offset * 512)
> >> -                        + (m_data->l2_index * sizeof(offset)),
> >> -                    &offset, sizeof(offset)) < 0) {
> >> -            return VMDK_ERROR;
> >> +        for (i = 0; i < m_data->nb_clusters; i++) {
> >> +            if (bdrv_pwrite_sync(extent->file,
> >> +                        ((int64_t)m_data->l2_offset * 512)
> >> +                            + ((m_data->l2_index + i) * sizeof(offset)),
> >> +                        &(offset), sizeof(offset)) < 0) {
> >> +                return VMDK_ERROR;
> >> +            }
> >> +            if (!zeroed) {
> >> +                offset += 128;
> >> +            }
> >>          }
> >>      }
> >> +
> >> +    offset = temp_offset;
> >>      if (m_data->l2_cache_entry) {
> >> -        *m_data->l2_cache_entry = offset;
> >> +        for (i = 0; i < m_data->nb_clusters; i++) {
> >> +            *m_data->l2_cache_entry = offset;
> >> +            m_data->l2_cache_entry++;
> >> +
> >> +            if (!zeroed) {
> >> +                offset += 128;
> >> +            }
> >> +        }
> >> +    }
> >> +
> >> +    return VMDK_OK;
> >> +}
> >> +
> >> +static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
> >> +                         bool zeroed)
> >> +{
> >> +    int ret;
> >> +
> >> +    while (m_data->next != NULL) {
> >> +        VmdkMetaData *next;
> >> +
> >> +        ret = vmdk_alloc_cluster_link_l2(extent, m_data, zeroed);
> >> +        if (ret < 0) {
> >> +            return ret;
> >> +        }
> >> +
> >> +        next = m_data->next;
> >> +        m_data = next;
> >
> > I don't see why the next pointer is necessary.  Also the tail is always unused,
> > why do you need to allocate it?
> 
> If I don't allocate the tail, I was getting a segfault in vmdk_pwritev().

That may be because the way you interate the linked list in vmdk_pwritev is:

>        while (m_data->next != NULL) {
>            VmdkMetaData *next;
>            next = m_data->next;
>            g_free(m_data);
>            m_data = next;
>        }
>

which does require a dummy tail.

But after all it's still not clear to me why you cannot keep m_data on stack,
and why you need the next pointer at all.

Fam

  reply	other threads:[~2017-03-31  9:08 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-03-25 11:18 [Qemu-devel] [PATCH v2 0/7] Optiomize VMDK I/O by allocating multiple clusters Ashijeet Acharya
2017-03-25 11:18 ` [Qemu-devel] [PATCH v2 1/7] vmdk: Refactor and introduce new helper functions Ashijeet Acharya
2017-03-31  5:52   ` Fam Zheng
2017-03-25 11:18 ` [Qemu-devel] [PATCH v2 2/7] vmdk: Rename get_whole_cluster() to vmdk_perform_cow() Ashijeet Acharya
2017-03-31  5:55   ` Fam Zheng
2017-03-25 11:18 ` [Qemu-devel] [PATCH v2 3/7] vmdk: Factor out metadata loading code out of get_cluster_offset() Ashijeet Acharya
2017-03-31  6:03   ` Fam Zheng
2017-04-01 13:22     ` Ashijeet Acharya
2017-03-25 11:18 ` [Qemu-devel] [PATCH v2 4/7] vmdk: New functions to allocate multiple clusters and cluster offset Ashijeet Acharya
2017-03-25 11:18 ` [Qemu-devel] [PATCH v2 5/7] vmdk: Rename get_cluster_offset() to vmdk_get_cluster_offset() Ashijeet Acharya
2017-03-25 11:18 ` [Qemu-devel] [PATCH v2 6/7] vmdk: Allocate multiple clusters at once Ashijeet Acharya
2017-03-25 11:18 ` [Qemu-devel] [PATCH v2 7/7] vmdk: Update metadata for multiple clusters Ashijeet Acharya
2017-03-31  7:26   ` Fam Zheng
2017-03-31  8:47     ` Ashijeet Acharya
2017-03-31  9:08       ` Fam Zheng [this message]
2017-03-31  9:41         ` Ashijeet Acharya

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170331090806.GH11195@lemon.lan \
    --to=famz@redhat.com \
    --cc=ashijeetacharya@gmail.com \
    --cc=jsnow@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).