From: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
To: qemu-devel@nongnu.org
Cc: qemu-block@nongnu.org, den@virtuozzo.com, stefanha@redhat.com,
vsementsov@yandex-team.ru, kwolf@redhat.com, hreitz@redhat.com
Subject: [PATCH v5 11/22] parallels: Add dirty bitmaps saving
Date: Mon, 11 Mar 2024 19:18:39 +0100 [thread overview]
Message-ID: <20240311181850.73013-12-alexander.ivanov@virtuozzo.com> (raw)
In-Reply-To: <20240311181850.73013-1-alexander.ivanov@virtuozzo.com>
Now dirty bitmaps can be loaded but there is no their saving. Add code for
dirty bitmap storage.
Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
---
block/parallels-ext.c | 187 ++++++++++++++++++++++++++++++++++++++++++
block/parallels.c | 16 +++-
block/parallels.h | 5 ++
3 files changed, 206 insertions(+), 2 deletions(-)
diff --git a/block/parallels-ext.c b/block/parallels-ext.c
index b4e14c88f2..9cb8e65c0d 100644
--- a/block/parallels-ext.c
+++ b/block/parallels-ext.c
@@ -24,6 +24,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "block/block-io.h"
#include "block/block_int.h"
@@ -300,3 +301,189 @@ out:
return ret;
}
+
+static void GRAPH_RDLOCK parallels_save_bitmap(BlockDriverState *bs,
+ BdrvDirtyBitmap *bitmap,
+ uint8_t **buf, int *buf_size)
+{
+ BDRVParallelsState *s = bs->opaque;
+ ParallelsFeatureHeader *fh;
+ ParallelsDirtyBitmapFeature *bh;
+ uint64_t *l1_table, l1_size, granularity, bits_per_cluster;
+ int64_t bm_size, ser_size, offset, buf_used;
+ int64_t idx, alloc_size = 1;
+ const char *name;
+ uint8_t *bm_buf;
+ QemuUUID uuid;
+ int ret = 0;
+
+ if (!bdrv_dirty_bitmap_get_persistence(bitmap) ||
+ bdrv_dirty_bitmap_inconsistent(bitmap)) {
+ return;
+ }
+
+ bm_size = bdrv_dirty_bitmap_size(bitmap);
+ granularity = bdrv_dirty_bitmap_granularity(bitmap);
+ bits_per_cluster = bdrv_dirty_bitmap_serialization_coverage(s->cluster_size,
+ bitmap);
+ ser_size = bdrv_dirty_bitmap_serialization_size(bitmap, 0, bm_size);
+ l1_size = DIV_ROUND_UP(ser_size, s->cluster_size);
+
+ buf_used = l1_size * 8 + sizeof(*fh) + sizeof(*bh);
+ /* Check if there is enough space for the final section */
+ if (*buf_size - buf_used < sizeof(*fh)) {
+ error_report("Can't save dirty bitmap: not enoughf space");
+ return;
+ }
+
+ name = bdrv_dirty_bitmap_name(bitmap);
+ ret = qemu_uuid_parse(name, &uuid);
+ if (ret < 0) {
+ error_report("Can't save dirty bitmap '%s': ID parsing error", name);
+ return;
+ }
+
+ fh = (ParallelsFeatureHeader *)*buf;
+ bh = (ParallelsDirtyBitmapFeature *)(fh + 1);
+ l1_table = (uint64_t *)(bh + 1);
+
+ fh->magic = cpu_to_le64(PARALLELS_DIRTY_BITMAP_FEATURE_MAGIC);
+ fh->data_size = cpu_to_le32(l1_size * 8 + sizeof(*bh));
+
+ bh->l1_size = cpu_to_le32(l1_size);
+ bh->size = cpu_to_le64(bm_size >> BDRV_SECTOR_BITS);
+ bh->granularity = cpu_to_le32(granularity >> BDRV_SECTOR_BITS);
+ memcpy(bh->id, &uuid, sizeof(uuid));
+
+ bm_buf = qemu_blockalign(bs, s->cluster_size);
+
+ offset = 0;
+ while ((offset = bdrv_dirty_bitmap_next_dirty(bitmap, offset, bm_size)) >= 0) {
+ idx = offset / bits_per_cluster;
+ int64_t cluster_off, end, write_size;
+
+ offset = QEMU_ALIGN_DOWN(offset, bits_per_cluster);
+ end = MIN(bm_size, offset + bits_per_cluster);
+ write_size = bdrv_dirty_bitmap_serialization_size(bitmap, offset,
+ end - offset);
+ assert(write_size <= s->cluster_size);
+
+ bdrv_dirty_bitmap_serialize_part(bitmap, bm_buf, offset, end - offset);
+ if (write_size < s->cluster_size) {
+ memset(bm_buf + write_size, 0, s->cluster_size - write_size);
+ }
+
+ l1_table[idx] = 0;
+
+ cluster_off = parallels_allocate_host_clusters(bs, &alloc_size);
+ if (cluster_off <= 0) {
+ error_report("Can't save dirty bitmap '%s': cluster allocation error",
+ name);
+ goto clean_allocated_clusters;
+ }
+
+ l1_table[idx] = cpu_to_le64(cluster_off >> BDRV_SECTOR_BITS);
+
+ ret = bdrv_pwrite(bs->file, cluster_off, s->cluster_size, bm_buf, 0);
+ if (ret < 0) {
+ error_report("Can't save dirty bitmap '%s': write error", name);
+ memset(&fh->magic, 0, sizeof(fh->magic));
+ goto clean_allocated_clusters;
+ }
+
+ offset = end;
+ }
+
+ *buf_size -= buf_used;
+ *buf += buf_used;
+ qemu_vfree(bm_buf);
+ return;
+
+clean_allocated_clusters:
+ for (; idx >= 0; idx--) {
+ uint64_t cluster_off;
+ cluster_off = le64_to_cpu(l1_table[idx]);
+ if (cluster_off == 0 || cluster_off == 1) {
+ continue;
+ }
+ parallels_mark_unused(bs, s->used_bmap, s->used_bmap_size,
+ cluster_off << BDRV_SECTOR_BITS, 1);
+ }
+ qemu_vfree(bm_buf);
+}
+
+int GRAPH_RDLOCK
+parallels_store_persistent_dirty_bitmaps(BlockDriverState *bs, Error **errp)
+{
+ BDRVParallelsState *s = bs->opaque;
+ BdrvDirtyBitmap *bitmap;
+ ParallelsFormatExtensionHeader *eh;
+ int remaining = s->cluster_size;
+ uint8_t *buf, *pos;
+ int64_t header_off, alloc_size = 1;
+ g_autofree uint8_t *hash = NULL;
+ size_t hash_len = 0;
+ int ret;
+
+ s->header->ext_off = 0;
+
+ if (!bdrv_has_named_bitmaps(bs)) {
+ return 0;
+ }
+
+ buf = qemu_blockalign0(bs, s->cluster_size);
+
+ eh = (ParallelsFormatExtensionHeader *)buf;
+ pos = buf + sizeof(*eh);
+ remaining -= sizeof(*eh);
+
+ eh->magic = cpu_to_le64(PARALLELS_FORMAT_EXTENSION_MAGIC);
+
+ FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
+ parallels_save_bitmap(bs, bitmap, &pos, &remaining);
+ }
+
+ header_off = parallels_allocate_host_clusters(bs, &alloc_size);
+ if (header_off < 0) {
+ error_report("Can't save dirty bitmap: cluster allocation error");
+ ret = header_off;
+ goto end;
+ }
+
+ ret = qcrypto_hash_bytes(QCRYPTO_HASH_ALG_MD5,
+ (const char *)(buf + sizeof(*eh)),
+ s->cluster_size - sizeof(*eh),
+ &hash, &hash_len, errp);
+ if (ret < 0 || hash_len != sizeof(eh->check_sum)) {
+ error_report("Can't save dirty bitmap: hash error");
+ ret = -EINVAL;
+ goto end;
+ }
+ memcpy(eh->check_sum, hash, hash_len);
+
+ ret = bdrv_pwrite(bs->file, header_off, s->cluster_size, buf, 0);
+ if (ret < 0) {
+ error_report("Can't save dirty bitmap: IO error");
+ parallels_mark_unused(bs, s->used_bmap, s->used_bmap_size,
+ header_off, 1);
+ goto end;
+ }
+
+ s->header->ext_off = cpu_to_le64(header_off / BDRV_SECTOR_SIZE);
+end:
+ qemu_vfree(buf);
+ return ret;
+}
+
+bool coroutine_fn parallels_co_can_store_new_dirty_bitmap(BlockDriverState *bs,
+ const char *name,
+ uint32_t granularity,
+ Error **errp)
+{
+ if (bdrv_find_dirty_bitmap(bs, name)) {
+ error_setg(errp, "Bitmap already exists: %s", name);
+ return false;
+ }
+
+ return true;
+}
diff --git a/block/parallels.c b/block/parallels.c
index 86958506d0..9536e4241e 100644
--- a/block/parallels.c
+++ b/block/parallels.c
@@ -1471,14 +1471,25 @@ fail:
static int GRAPH_RDLOCK parallels_inactivate(BlockDriverState *bs)
{
BDRVParallelsState *s = bs->opaque;
+ Error *err = NULL;
int ret;
+ ret = parallels_store_persistent_dirty_bitmaps(bs, &err);
+ if (ret != 0) {
+ error_reportf_err(err, "Lost persistent bitmaps during "
+ "inactivation of node '%s': ",
+ bdrv_get_device_or_node_name(bs));
+ }
+
s->header->inuse = 0;
parallels_update_header(bs);
/* errors are ignored, so we might as well pass exact=true */
- ret = bdrv_truncate(bs->file, s->data_end << BDRV_SECTOR_BITS, true,
- PREALLOC_MODE_OFF, 0, NULL);
+ ret = bdrv_truncate(bs->file, s->data_end << BDRV_SECTOR_BITS,
+ true, PREALLOC_MODE_OFF, 0, NULL);
+ if (ret < 0) {
+ error_report("Failed to truncate image: %s", strerror(-ret));
+ }
return ret;
}
@@ -1530,6 +1541,7 @@ static BlockDriver bdrv_parallels = {
.bdrv_co_pdiscard = parallels_co_pdiscard,
.bdrv_co_pwrite_zeroes = parallels_co_pwrite_zeroes,
.bdrv_inactivate = parallels_inactivate,
+ .bdrv_co_can_store_new_dirty_bitmap = parallels_co_can_store_new_dirty_bitmap,
};
static void bdrv_parallels_init(void)
diff --git a/block/parallels.h b/block/parallels.h
index 493c89e976..d1e46dcfa8 100644
--- a/block/parallels.h
+++ b/block/parallels.h
@@ -101,5 +101,10 @@ int64_t GRAPH_RDLOCK parallels_allocate_host_clusters(BlockDriverState *bs,
int GRAPH_RDLOCK
parallels_read_format_extension(BlockDriverState *bs, int64_t ext_off,
Error **errp);
+int GRAPH_RDLOCK
+parallels_store_persistent_dirty_bitmaps(BlockDriverState *bs, Error **errp);
+bool coroutine_fn
+parallels_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
+ uint32_t granularity, Error **errp);
#endif
--
2.40.1
next prev parent reply other threads:[~2024-03-11 18:24 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-11 18:18 [PATCH v5 00/22] parallels: Add full dirty bitmap support Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 01/22] parallels: Set s->used_bmap to NULL in parallels_free_used_bitmap() Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 02/22] parallels: Move inactivation code to a separate function Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 03/22] parallels: Make mark_used() a global function Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 04/22] parallels: Limit search in parallels_mark_used to the last marked claster Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 05/22] parallels: Add parallels_mark_unused() helper Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 06/22] parallels: Move host clusters allocation to a separate function Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 07/22] parallels: Set data_end value in parallels_check_leak() Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 08/22] parallels: Recreate used bitmap " Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 09/22] parallels: Add a note about used bitmap in parallels_check_duplicate() Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 10/22] parallels: Create used bitmap even if checks needed Alexander Ivanov
2024-03-11 18:18 ` Alexander Ivanov [this message]
2024-03-11 18:18 ` [PATCH v5 12/22] parallels: drop dirty bitmap data if the image was not properly closed Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 13/22] parallels: Let image extensions work in RW mode Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 14/22] parallels: Preserve extensions cluster for non-transient extensions Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 15/22] parallels: Handle L1 entries equal to one Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 16/22] parallels: Make a loaded dirty bitmap persistent Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 17/22] parallels: Reverse a conditional in parallels_check_leak() to reduce indents Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 18/22] parallels: Check unused clusters in parallels_check_leak() Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 19/22] parallels: Remove unnecessary data_end field Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 20/22] tests: Add parallels images support to test 165 Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 21/22] tests: Turned on 256, 299, 304 and block-status-cache for parallels format Alexander Ivanov
2024-03-11 18:18 ` [PATCH v5 22/22] tests: Add parallels format support to image-fleecing Alexander Ivanov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240311181850.73013-12-alexander.ivanov@virtuozzo.com \
--to=alexander.ivanov@virtuozzo.com \
--cc=den@virtuozzo.com \
--cc=hreitz@redhat.com \
--cc=kwolf@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
--cc=vsementsov@yandex-team.ru \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).