qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jun Lee <juli@redhat.com>
To: kwolf@redhat.com, stefanha@redhat.com
Cc: qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH] qcow2: Patch for shrinking qcow2 disk image
Date: Tue, 1 Apr 2014 10:06:26 -0400 (EDT)	[thread overview]
Message-ID: <1040665119.526014.1396361186516.JavaMail.zimbra@redhat.com> (raw)

Signed-off-by: Jun Li <junmuzi@gmail.com>

This patch can make sure the data still existing after shrinking. And only discard the unused (guest) clusters. If shrinking to the size which stored data, It will return an error and will not do any change.
As this patch can support shrinking, so changed the func name of qcow2_grow_l1_table to qcow2_truncate_l1_table.
---
 block/qcow2-cluster.c  | 20 ++++++++++++++------
 block/qcow2-snapshot.c |  2 +-
 block/qcow2.c          |  8 +-------
 block/qcow2.h          |  2 +-
 4 files changed, 17 insertions(+), 15 deletions(-)

diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index 9499df9..70e61ea 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -29,7 +29,7 @@
 #include "block/qcow2.h"
 #include "trace.h"
 
-int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
+int qcow2_truncate_l1_table(BlockDriverState *bs, uint64_t min_size,
                         bool exact_size)
 {
     BDRVQcowState *s = bs->opaque;
@@ -39,9 +39,6 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
     int64_t new_l1_table_offset, new_l1_size;
     uint8_t data[12];
 
-    if (min_size <= s->l1_size)
-        return 0;
-
     if (exact_size) {
         new_l1_size = min_size;
     } else {
@@ -66,7 +63,18 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
 
     new_l1_size2 = sizeof(uint64_t) * new_l1_size;
     new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
-    memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
+
+    /* shrinking the image */
+    if (min_size <= s->l1_size) {
+        if (s->l1_table[new_l1_size] != 0) {
+            error_report("Could not shrink to this size, "
+                        "it will destory image data");
+            return -ENOTSUP;
+        }
+        memcpy(new_l1_table, s->l1_table, new_l1_size2);
+    }
+
+   memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
 
     /* write new table (align to cluster) */
     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
@@ -559,7 +567,7 @@ static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
 
     l1_index = offset >> (s->l2_bits + s->cluster_bits);
     if (l1_index >= s->l1_size) {
-        ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
+        ret = qcow2_truncate_l1_table(bs, l1_index + 1, false);
         if (ret < 0) {
             return ret;
         }
diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c
index 2fc6320..ab16c52 100644
--- a/block/qcow2-snapshot.c
+++ b/block/qcow2-snapshot.c
@@ -491,7 +491,7 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
      * L1 table of the snapshot. If the snapshot L1 table is smaller, the
      * current one must be padded with zeros.
      */
-    ret = qcow2_grow_l1_table(bs, sn->l1_size, true);
+    ret = qcow2_truncate_l1_table(bs, sn->l1_size, true);
     if (ret < 0) {
         goto fail;
     }
diff --git a/block/qcow2.c b/block/qcow2.c
index b9dc960..4797879 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -1764,14 +1764,8 @@ static int qcow2_truncate(BlockDriverState *bs, int64_t offset)
         return -ENOTSUP;
     }
 
-    /* shrinking is currently not supported */
-    if (offset < bs->total_sectors * 512) {
-        error_report("qcow2 doesn't support shrinking images yet");
-        return -ENOTSUP;
-    }
-
     new_l1_size = size_to_l1(s, offset);
-    ret = qcow2_grow_l1_table(bs, new_l1_size, true);
+    ret = qcow2_truncate_l1_table(bs, new_l1_size, true);
     if (ret < 0) {
         return ret;
     }
diff --git a/block/qcow2.h b/block/qcow2.h
index 0b0eac8..298d84e 100644
--- a/block/qcow2.h
+++ b/block/qcow2.h
@@ -455,7 +455,7 @@ int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
                                   int64_t size);
 
 /* qcow2-cluster.c functions */
-int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
+int qcow2_truncate_l1_table(BlockDriverState *bs, uint64_t min_size,
                         bool exact_size);
 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index);
 void qcow2_l2_cache_reset(BlockDriverState *bs);
-- 
1.8.3.1

             reply	other threads:[~2014-04-01 14:06 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-04-01 14:06 Jun Lee [this message]
2014-04-02  2:24 ` [Qemu-devel] [PATCH] qcow2: Patch for shrinking qcow2 disk image Fam Zheng
2014-04-03 11:28   ` Jun Lee
2014-04-04  0:51     ` Fam Zheng
  -- strict thread matches above, loose matches on Subject: below --
2014-04-01 14:03 Jun Li
2014-04-01 13:55 Jun Li
2014-04-01 13:48 Jun Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1040665119.526014.1396361186516.JavaMail.zimbra@redhat.com \
    --to=juli@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).