From: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
To: qemu-devel@nongnu.org
Cc: qemu-block@nongnu.org, den@virtuozzo.com, stefanha@redhat.com,
vsementsov@yandex-team.ru, kwolf@redhat.com, hreitz@redhat.com
Subject: [PATCH v3 10/21] parallels: Add dirty bitmaps saving
Date: Fri, 27 Oct 2023 09:46:25 +0200 [thread overview]
Message-ID: <20231027074636.430139-11-alexander.ivanov@virtuozzo.com> (raw)
In-Reply-To: <20231027074636.430139-1-alexander.ivanov@virtuozzo.com>
Now dirty bitmaps can be loaded but there is no their saving. Add code for
dirty bitmap storage.
Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
---
block/parallels-ext.c | 167 ++++++++++++++++++++++++++++++++++++++++++
block/parallels.c | 16 +++-
block/parallels.h | 5 ++
3 files changed, 186 insertions(+), 2 deletions(-)
diff --git a/block/parallels-ext.c b/block/parallels-ext.c
index 8a109f005a..0a632a2331 100644
--- a/block/parallels-ext.c
+++ b/block/parallels-ext.c
@@ -24,6 +24,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "block/block-io.h"
#include "block/block_int.h"
@@ -301,3 +302,169 @@ out:
return ret;
}
+
+static void parallels_save_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
+ uint8_t **buf, int *buf_size)
+{
+ BDRVParallelsState *s = bs->opaque;
+ ParallelsFeatureHeader *fh;
+ ParallelsDirtyBitmapFeature *bh;
+ uint64_t *l1_table, l1_size, granularity, limit;
+ int64_t bm_size, ser_size, offset, buf_used;
+ int64_t alloc_size = 1;
+ const char *name;
+ uint8_t *bm_buf;
+ QemuUUID uuid;
+ int ret = 0;
+
+ if (!bdrv_dirty_bitmap_get_persistence(bitmap) ||
+ bdrv_dirty_bitmap_inconsistent(bitmap)) {
+ return;
+ }
+
+ bm_size = bdrv_dirty_bitmap_size(bitmap);
+ granularity = bdrv_dirty_bitmap_granularity(bitmap);
+ limit = bdrv_dirty_bitmap_serialization_coverage(s->cluster_size, bitmap);
+ ser_size = bdrv_dirty_bitmap_serialization_size(bitmap, 0, bm_size);
+ l1_size = DIV_ROUND_UP(ser_size, s->cluster_size);
+
+ buf_used = l1_size * 8 + sizeof(*fh) + sizeof(*bh);
+ /* Check if there is enough space for the final section */
+ if (*buf_size - buf_used < sizeof(*fh)) {
+ return;
+ }
+
+ name = bdrv_dirty_bitmap_name(bitmap);
+ ret = qemu_uuid_parse(name, &uuid);
+ if (ret < 0) {
+ error_report("Can't save dirty bitmap: ID parsing error: '%s'", name);
+ return;
+ }
+
+ fh = (ParallelsFeatureHeader *)*buf;
+ bh = (ParallelsDirtyBitmapFeature *)(*buf + sizeof(*fh));
+ l1_table = (uint64_t *)((uint8_t *)bh + sizeof(*bh));
+
+ fh->magic = cpu_to_le64(PARALLELS_DIRTY_BITMAP_FEATURE_MAGIC);
+ fh->data_size = cpu_to_le32(l1_size * 8 + sizeof(*bh));
+
+ bh->l1_size = cpu_to_le32(l1_size);
+ bh->size = cpu_to_le64(bm_size >> BDRV_SECTOR_BITS);
+ bh->granularity = cpu_to_le32(granularity >> BDRV_SECTOR_BITS);
+ memcpy(bh->id, &uuid, sizeof(uuid));
+
+ bm_buf = qemu_blockalign(bs, s->cluster_size);
+
+ offset = 0;
+ while ((offset = bdrv_dirty_bitmap_next_dirty(bitmap, offset, bm_size)) >= 0) {
+ uint64_t idx = offset / limit;
+ int64_t cluster_off, end, write_size;
+
+ offset = QEMU_ALIGN_DOWN(offset, limit);
+ end = MIN(bm_size, offset + limit);
+ write_size = bdrv_dirty_bitmap_serialization_size(bitmap, offset,
+ end - offset);
+ assert(write_size <= s->cluster_size);
+
+ bdrv_dirty_bitmap_serialize_part(bitmap, bm_buf, offset, end - offset);
+ if (write_size < s->cluster_size) {
+ memset(bm_buf + write_size, 0, s->cluster_size - write_size);
+ }
+
+ cluster_off = parallels_allocate_host_clusters(bs, &alloc_size);
+ if (cluster_off <= 0) {
+ goto end;
+ }
+
+ ret = bdrv_pwrite(bs->file, cluster_off, s->cluster_size, bm_buf, 0);
+ if (ret < 0) {
+ memset(&fh->magic, 0, sizeof(fh->magic));
+ parallels_mark_unused(bs, s->used_bmap, s->used_bmap_size,
+ cluster_off, 1);
+ goto end;
+ }
+
+ l1_table[idx] = cpu_to_le64(cluster_off >> BDRV_SECTOR_BITS);
+ offset = end;
+ }
+
+ *buf_size -= buf_used;
+ *buf += buf_used;
+
+end:
+ qemu_vfree(bm_buf);
+}
+
+void parallels_store_persistent_dirty_bitmaps(BlockDriverState *bs,
+ Error **errp)
+{
+ BDRVParallelsState *s = bs->opaque;
+ BdrvDirtyBitmap *bitmap;
+ ParallelsFormatExtensionHeader *eh;
+ int remaining = s->cluster_size;
+ uint8_t *buf, *pos;
+ int64_t header_off, alloc_size = 1;
+ g_autofree uint8_t *hash = NULL;
+ size_t hash_len = 0;
+ int ret;
+
+ s->header->ext_off = 0;
+
+ if (!bdrv_has_named_bitmaps(bs)) {
+ return;
+ }
+
+ buf = qemu_blockalign0(bs, s->cluster_size);
+
+ eh = (ParallelsFormatExtensionHeader *)buf;
+ pos = buf + sizeof(*eh);
+
+ eh->magic = cpu_to_le64(PARALLELS_FORMAT_EXTENSION_MAGIC);
+
+ FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
+ parallels_save_bitmap(bs, bitmap, &pos, &remaining);
+ }
+
+ header_off = parallels_allocate_host_clusters(bs, &alloc_size);
+ if (header_off < 0) {
+ error_report("Can't save dirty bitmap: cluster allocation error");
+ ret = header_off;
+ goto end;
+ }
+
+ ret = qcrypto_hash_bytes(QCRYPTO_HASH_ALG_MD5,
+ (const char *)(buf + sizeof(*eh)),
+ s->cluster_size - sizeof(*eh),
+ &hash, &hash_len, errp);
+ if (ret < 0 || hash_len != sizeof(eh->check_sum)) {
+ error_report("Can't save dirty bitmap: hash error");
+ ret = -EINVAL;
+ goto end;
+ }
+ memcpy(eh->check_sum, hash, hash_len);
+
+ ret = bdrv_pwrite(bs->file, header_off, s->cluster_size, buf, 0);
+ if (ret < 0) {
+ error_report("Can't save dirty bitmap: IO error");
+ parallels_mark_unused(bs, s->used_bmap, s->used_bmap_size,
+ header_off, 1);
+ goto end;
+ }
+
+ s->header->ext_off = cpu_to_le64(header_off / BDRV_SECTOR_SIZE);
+end:
+ qemu_vfree(buf);
+}
+
+bool coroutine_fn parallels_co_can_store_new_dirty_bitmap(BlockDriverState *bs,
+ const char *name,
+ uint32_t granularity,
+ Error **errp)
+{
+ if (bdrv_find_dirty_bitmap(bs, name)) {
+ error_setg(errp, "Bitmap already exists: %s", name);
+ return false;
+ }
+
+ return true;
+}
diff --git a/block/parallels.c b/block/parallels.c
index 925aa9e569..2d82e8ff6a 100644
--- a/block/parallels.c
+++ b/block/parallels.c
@@ -1468,14 +1468,25 @@ fail:
static int parallels_inactivate(BlockDriverState *bs)
{
BDRVParallelsState *s = bs->opaque;
+ Error *err = NULL;
int ret;
+ parallels_store_persistent_dirty_bitmaps(bs, &err);
+ if (err != NULL) {
+ error_reportf_err(err, "Lost persistent bitmaps during "
+ "inactivation of node '%s': ",
+ bdrv_get_device_or_node_name(bs));
+ }
+
s->header->inuse = 0;
parallels_update_header(bs);
/* errors are ignored, so we might as well pass exact=true */
- ret = bdrv_truncate(bs->file, s->data_end << BDRV_SECTOR_BITS, true,
- PREALLOC_MODE_OFF, 0, NULL);
+ ret = bdrv_truncate(bs->file, s->data_end << BDRV_SECTOR_BITS,
+ true, PREALLOC_MODE_OFF, 0, NULL);
+ if (ret < 0) {
+ error_report("Failed to truncate image: %s", strerror(-ret));
+ }
return ret;
}
@@ -1525,6 +1536,7 @@ static BlockDriver bdrv_parallels = {
.bdrv_co_pdiscard = parallels_co_pdiscard,
.bdrv_co_pwrite_zeroes = parallels_co_pwrite_zeroes,
.bdrv_inactivate = parallels_inactivate,
+ .bdrv_co_can_store_new_dirty_bitmap = parallels_co_can_store_new_dirty_bitmap,
};
static void bdrv_parallels_init(void)
diff --git a/block/parallels.h b/block/parallels.h
index 4e7aa6b80f..18b4f8068e 100644
--- a/block/parallels.h
+++ b/block/parallels.h
@@ -100,6 +100,11 @@ int64_t parallels_allocate_host_clusters(BlockDriverState *bs,
int parallels_read_format_extension(BlockDriverState *bs,
int64_t ext_off, Error **errp);
+void parallels_store_persistent_dirty_bitmaps(BlockDriverState *bs,
+ Error **errp);
+bool coroutine_fn
+parallels_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
+ uint32_t granularity, Error **errp);
#endif
--
2.34.1
next prev parent reply other threads:[~2023-10-27 7:48 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-27 7:46 [PATCH v3 00/21] parallels: Add full dirty bitmap support Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 01/21] parallels: Set s->used_bmap to NULL in parallels_free_used_bitmap() Alexander Ivanov
2023-10-30 8:44 ` Denis V. Lunev
2023-10-27 7:46 ` [PATCH v3 02/21] parallels: Move inactivation code to a separate function Alexander Ivanov
2023-10-30 8:45 ` Denis V. Lunev
2023-10-27 7:46 ` [PATCH v3 03/21] parallels: Make mark_used() a global function Alexander Ivanov
2023-10-30 8:47 ` Denis V. Lunev
2023-10-27 7:46 ` [PATCH v3 04/21] parallels: Add parallels_mark_unused() helper Alexander Ivanov
2023-10-30 9:06 ` Denis V. Lunev
2023-10-30 9:09 ` Denis V. Lunev
2023-11-13 9:53 ` Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 05/21] parallels: Move host clusters allocation to a separate function Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 06/21] parallels: Set data_end value in parallels_check_leak() Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 07/21] parallels: Recreate used bitmap " Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 08/21] parallels: Add a note about used bitmap in parallels_check_duplicate() Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 09/21] parallels: Create used bitmap even if checks needed Alexander Ivanov
2023-10-27 7:46 ` Alexander Ivanov [this message]
2023-10-27 7:46 ` [PATCH v3 11/21] parallels: Mark parallels_inactivate GRAPH_RDLOCK, guard parallels_close Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 12/21] parallels: Let image extensions work in RW mode Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 13/21] parallels: Handle L1 entries equal to one Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 14/21] parallels: Make a loaded dirty bitmap persistent Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 15/21] parallels: Reverse a conditional in parallels_check_leak() to reduce indents Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 16/21] parallels: Truncate images on the last used cluster Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 17/21] parallels: Check unused clusters in parallels_check_leak() Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 18/21] parallels: Remove unnecessary data_end field Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 19/21] tests: Add parallels images support to test 165 Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 20/21] tests: Turned on 256, 299, 304 and block-status-cache for parallels format Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 21/21] tests: Add parallels format support to image-fleecing Alexander Ivanov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231027074636.430139-11-alexander.ivanov@virtuozzo.com \
--to=alexander.ivanov@virtuozzo.com \
--cc=den@virtuozzo.com \
--cc=hreitz@redhat.com \
--cc=kwolf@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
--cc=vsementsov@yandex-team.ru \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).