qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
To: qemu-block@nongnu.org, qemu-devel@nongnu.org
Cc: kwolf@redhat.com, mreitz@redhat.com, jsnow@redhat.com,
	famz@redhat.com, den@openvz.org, stefanha@redhat.com,
	vsementsov@virtuozzo.com, pbonzini@redhat.com, jcody@redhat.com
Subject: [Qemu-devel] [PATCH 06/21] backup: rewrite top mode cluster skipping
Date: Fri, 23 Dec 2016 17:28:49 +0300	[thread overview]
Message-ID: <1482503344-6424-7-git-send-email-vsementsov@virtuozzo.com> (raw)
In-Reply-To: <1482503344-6424-1-git-send-email-vsementsov@virtuozzo.com>

TOP backup mode skips not allocated clusters. This patch mark skipped
clusters in copy_bitmap to prevent their writing in write notifier
(however, they may be written before skipping, but that is not
critical).

Also, update job->common.offset appropriately, to come eventually to
job->common.len.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
 block/backup.c | 135 ++++++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 104 insertions(+), 31 deletions(-)

diff --git a/block/backup.c b/block/backup.c
index 938b7df..e2b944a 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -53,6 +53,11 @@ static inline int64_t cluster_size_sectors(BackupBlockJob *job)
   return job->cluster_size / BDRV_SECTOR_SIZE;
 }
 
+static inline int64_t max_query_sectors(BackupBlockJob *job)
+{
+    return (INT_MAX & ~(job->cluster_size - 1)) >> BDRV_SECTOR_BITS;
+}
+
 /* See if in-flight requests overlap and wait for them to complete */
 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
                                                        int64_t start,
@@ -374,6 +379,101 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
     return false;
 }
 
+static void backup_skip_clusters(BackupBlockJob *job,
+                                 int64_t start, int64_t end)
+{
+    CowRequest cow_request;
+
+    wait_for_overlapping_requests(job, start, end);
+    cow_request_begin(&cow_request, job, start, end);
+
+    if (end * job->cluster_size > job->common.len) {
+        int64_t n;
+        end--;
+        n = job->common.len - end * job->cluster_size;
+        assert(n > 0);
+
+        if (hbitmap_get(job->copy_bitmap, end)) {
+            hbitmap_reset(job->copy_bitmap, end, 1);
+            job->common.offset += n;
+        }
+    }
+
+    for ( ; start < end; start++) {
+        if (!hbitmap_get(job->copy_bitmap, start)) {
+            continue;
+        }
+
+        hbitmap_reset(job->copy_bitmap, start, 1);
+        job->common.offset += job->cluster_size;
+    }
+
+    cow_request_end(&cow_request);
+}
+
+static int backup_skip_unallocated_clusters(BackupBlockJob *job,
+                                            BlockDriverState *base,
+                                            int64_t start, int *n)
+{
+    int ret;
+    int64_t sectors_per_cluster = cluster_size_sectors(job);
+    BlockDriverState *bs = blk_bs(job->common.blk);
+    int64_t sector_end = job->common.len >> BDRV_SECTOR_BITS;
+    int64_t sector = start * sectors_per_cluster;
+    int max_sectors = MIN(max_query_sectors(job), sector_end - sector);
+    int n_sectors = 0;
+
+    ret = bdrv_is_allocated_above(bs, base, sector, max_sectors, &n_sectors);
+    if (ret < 0) {
+        return ret;
+    }
+
+    if (sector + n_sectors == sector_end || ret == 1) {
+        *n = DIV_ROUND_UP(n_sectors, sectors_per_cluster);
+    } else if (n_sectors < sectors_per_cluster) {
+        *n = 1;
+        ret = 1;
+    } else {
+        *n = n_sectors / sectors_per_cluster;
+    }
+
+    if (ret == 0) {
+        backup_skip_clusters(job, start, start + *n);
+    }
+
+    return 0;
+}
+
+static void backup_skip_loop(BackupBlockJob *job, BlockDriverState *base)
+{
+    HBitmapIter hbi;
+    int64_t cluster;
+    int64_t end = DIV_ROUND_UP(job->common.len, job->cluster_size);
+
+    hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
+    while ((cluster = hbitmap_iter_next(&hbi)) != -1) {
+        int n, ret;
+
+        if (yield_and_check(job)) {
+            return;
+        }
+
+        ret = backup_skip_unallocated_clusters(job, base, cluster, &n);
+        if (ret < 0) {
+            n = 1;
+        }
+
+        cluster += n;
+        if (cluster >= end) {
+            return;
+        }
+
+        if (n > 1) {
+            hbitmap_iter_init(&hbi, job->copy_bitmap, cluster);
+        }
+    }
+}
+
 static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
 {
     int ret;
@@ -465,6 +565,10 @@ static void coroutine_fn backup_run(void *opaque)
         ret = backup_run_incremental(job);
     } else {
         hbitmap_set(job->copy_bitmap, 0, end);
+        if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
+            backup_skip_loop(job, backing_bs(blk_bs(job->common.blk)));
+        }
+
         /* Both FULL and TOP SYNC_MODE's require copying.. */
         for (; start < end; start++) {
             bool error_is_read;
@@ -472,37 +576,6 @@ static void coroutine_fn backup_run(void *opaque)
                 break;
             }
 
-            if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
-                int i, n;
-                int alloced = 0;
-
-                /* Check to see if these blocks are already in the
-                 * backing file. */
-
-                for (i = 0; i < sectors_per_cluster;) {
-                    /* bdrv_is_allocated() only returns true/false based
-                     * on the first set of sectors it comes across that
-                     * are are all in the same state.
-                     * For that reason we must verify each sector in the
-                     * backup cluster length.  We end up copying more than
-                     * needed but at some point that is always the case. */
-                    alloced =
-                        bdrv_is_allocated(bs,
-                                start * sectors_per_cluster + i,
-                                sectors_per_cluster - i, &n);
-                    i += n;
-
-                    if (alloced == 1 || n == 0) {
-                        break;
-                    }
-                }
-
-                /* If the above loop never found any sectors that are in
-                 * the topmost image, skip this backup. */
-                if (alloced == 0) {
-                    continue;
-                }
-            }
             /* FULL sync mode we copy the whole drive. */
             ret = backup_do_cow(job, start * sectors_per_cluster,
                                 sectors_per_cluster, &error_is_read, false);
-- 
1.8.3.1

  parent reply	other threads:[~2016-12-23 17:02 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-23 14:28 [Qemu-devel] [PATCH 00/21] new backup architecture Vladimir Sementsov-Ogievskiy
2016-12-23 14:28 ` [Qemu-devel] [PATCH 01/21] backup: move from done_bitmap to copy_bitmap Vladimir Sementsov-Ogievskiy
2017-01-23  5:34   ` Jeff Cody
2017-01-23 12:20   ` Vladimir Sementsov-Ogievskiy
2017-01-31 10:25   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 02/21] backup: init copy_bitmap from sync_bitmap for incremental Vladimir Sementsov-Ogievskiy
2017-01-24  7:09   ` Fam Zheng
2017-01-24  9:00     ` Vladimir Sementsov-Ogievskiy
2017-01-24  9:46       ` Fam Zheng
2017-01-24 10:16         ` Vladimir Sementsov-Ogievskiy
2017-01-31 10:36   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 03/21] backup: improve non-dirty bits progress processing Vladimir Sementsov-Ogievskiy
2017-01-24  7:17   ` Fam Zheng
2017-01-24  9:12     ` Vladimir Sementsov-Ogievskiy
2017-01-31 10:56       ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 04/21] backup: use copy_bitmap in incremental backup Vladimir Sementsov-Ogievskiy
2017-01-31 11:01   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 05/21] hbitmap: improve dirty iter Vladimir Sementsov-Ogievskiy
2017-01-31 11:20   ` Stefan Hajnoczi
2017-01-31 11:29   ` Stefan Hajnoczi
2016-12-23 14:28 ` Vladimir Sementsov-Ogievskiy [this message]
2017-01-31 13:31   ` [Qemu-devel] [PATCH 06/21] backup: rewrite top mode cluster skipping Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 07/21] backup: refactor: merge top/full/incremental backup code Vladimir Sementsov-Ogievskiy
2017-01-31 14:26   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 08/21] backup: skip unallocated clusters for full mode Vladimir Sementsov-Ogievskiy
2017-01-24  7:59   ` Fam Zheng
2017-01-24  9:18     ` Vladimir Sementsov-Ogievskiy
2017-01-24  9:36       ` Fam Zheng
2017-01-24 10:13         ` Vladimir Sementsov-Ogievskiy
2017-01-24 11:12           ` Fam Zheng
2017-01-31 14:33   ` Stefan Hajnoczi
2017-01-31 14:38   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 09/21] backup: separate copy function Vladimir Sementsov-Ogievskiy
2017-01-31 14:40   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 10/21] backup: refactor backup_copy_cluster() Vladimir Sementsov-Ogievskiy
2017-01-31 14:57   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 11/21] backup: move r/w error handling code to r/w functions Vladimir Sementsov-Ogievskiy
2017-01-31 14:57   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 12/21] iotests: add supported_cache_modes to main function Vladimir Sementsov-Ogievskiy
2017-01-31 14:58   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 13/21] coroutine: add qemu_coroutine_add_next Vladimir Sementsov-Ogievskiy
2017-01-31 15:03   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 14/21] block: add trace point on bdrv_close_all Vladimir Sementsov-Ogievskiy
2017-01-31 15:03   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 15/21] bitmap: add bitmap_count_between() function Vladimir Sementsov-Ogievskiy
2017-01-31 15:15   ` Stefan Hajnoczi
2016-12-23 14:28 ` [Qemu-devel] [PATCH 16/21] hbitmap: add hbitmap_count_between() function Vladimir Sementsov-Ogievskiy
2017-01-31 15:56   ` Stefan Hajnoczi
2016-12-23 14:29 ` [Qemu-devel] [PATCH 17/21] backup: make all reads not serializing Vladimir Sementsov-Ogievskiy
2017-01-31 16:30   ` Stefan Hajnoczi
2016-12-23 14:29 ` [Qemu-devel] [PATCH 18/21] backup: new async architecture Vladimir Sementsov-Ogievskiy
2017-01-31 16:46   ` Stefan Hajnoczi
2017-02-01 16:13   ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2016-12-23 14:29 ` [Qemu-devel] [PATCH 20/21] backup: move bitmap handling from backup_do_cow to get_work Vladimir Sementsov-Ogievskiy
2016-12-23 14:29 ` [Qemu-devel] [PATCH 21/21] backup: refactor: remove backup_do_cow() Vladimir Sementsov-Ogievskiy
2017-01-09 11:04 ` [Qemu-devel] [PATCH 00/21] new backup architecture Stefan Hajnoczi
2017-01-10  6:05   ` Jeff Cody
2017-01-10 18:48     ` John Snow
2017-01-31 10:20 ` Stefan Hajnoczi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1482503344-6424-7-git-send-email-vsementsov@virtuozzo.com \
    --to=vsementsov@virtuozzo.com \
    --cc=den@openvz.org \
    --cc=famz@redhat.com \
    --cc=jcody@redhat.com \
    --cc=jsnow@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).