From: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
To: qemu-devel@nongnu.org
Cc: qemu-block@nongnu.org, den@virtuozzo.com, stefanha@redhat.com,
vsementsov@yandex-team.ru, kwolf@redhat.com, hreitz@redhat.com
Subject: [PATCH v4 11/21] parallels: Add dirty bitmaps saving
Date: Thu, 28 Dec 2023 11:12:22 +0100 [thread overview]
Message-ID: <20231228101232.372142-12-alexander.ivanov@virtuozzo.com> (raw)
In-Reply-To: <20231228101232.372142-1-alexander.ivanov@virtuozzo.com>
Now dirty bitmaps can be loaded but there is no their saving. Add code for
dirty bitmap storage.
Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
---
block/parallels-ext.c | 168 ++++++++++++++++++++++++++++++++++++++++++
block/parallels.c | 16 +++-
block/parallels.h | 5 ++
3 files changed, 187 insertions(+), 2 deletions(-)
diff --git a/block/parallels-ext.c b/block/parallels-ext.c
index b4e14c88f2..c83d1ea393 100644
--- a/block/parallels-ext.c
+++ b/block/parallels-ext.c
@@ -24,6 +24,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "block/block-io.h"
#include "block/block_int.h"
@@ -300,3 +301,170 @@ out:
return ret;
}
+
+static void GRAPH_RDLOCK parallels_save_bitmap(BlockDriverState *bs,
+ BdrvDirtyBitmap *bitmap,
+ uint8_t **buf, int *buf_size)
+{
+ BDRVParallelsState *s = bs->opaque;
+ ParallelsFeatureHeader *fh;
+ ParallelsDirtyBitmapFeature *bh;
+ uint64_t *l1_table, l1_size, granularity, limit;
+ int64_t bm_size, ser_size, offset, buf_used;
+ int64_t alloc_size = 1;
+ const char *name;
+ uint8_t *bm_buf;
+ QemuUUID uuid;
+ int ret = 0;
+
+ if (!bdrv_dirty_bitmap_get_persistence(bitmap) ||
+ bdrv_dirty_bitmap_inconsistent(bitmap)) {
+ return;
+ }
+
+ bm_size = bdrv_dirty_bitmap_size(bitmap);
+ granularity = bdrv_dirty_bitmap_granularity(bitmap);
+ limit = bdrv_dirty_bitmap_serialization_coverage(s->cluster_size, bitmap);
+ ser_size = bdrv_dirty_bitmap_serialization_size(bitmap, 0, bm_size);
+ l1_size = DIV_ROUND_UP(ser_size, s->cluster_size);
+
+ buf_used = l1_size * 8 + sizeof(*fh) + sizeof(*bh);
+ /* Check if there is enough space for the final section */
+ if (*buf_size - buf_used < sizeof(*fh)) {
+ return;
+ }
+
+ name = bdrv_dirty_bitmap_name(bitmap);
+ ret = qemu_uuid_parse(name, &uuid);
+ if (ret < 0) {
+ error_report("Can't save dirty bitmap: ID parsing error: '%s'", name);
+ return;
+ }
+
+ fh = (ParallelsFeatureHeader *)*buf;
+ bh = (ParallelsDirtyBitmapFeature *)(*buf + sizeof(*fh));
+ l1_table = (uint64_t *)((uint8_t *)bh + sizeof(*bh));
+
+ fh->magic = cpu_to_le64(PARALLELS_DIRTY_BITMAP_FEATURE_MAGIC);
+ fh->data_size = cpu_to_le32(l1_size * 8 + sizeof(*bh));
+
+ bh->l1_size = cpu_to_le32(l1_size);
+ bh->size = cpu_to_le64(bm_size >> BDRV_SECTOR_BITS);
+ bh->granularity = cpu_to_le32(granularity >> BDRV_SECTOR_BITS);
+ memcpy(bh->id, &uuid, sizeof(uuid));
+
+ bm_buf = qemu_blockalign(bs, s->cluster_size);
+
+ offset = 0;
+ while ((offset = bdrv_dirty_bitmap_next_dirty(bitmap, offset, bm_size)) >= 0) {
+ uint64_t idx = offset / limit;
+ int64_t cluster_off, end, write_size;
+
+ offset = QEMU_ALIGN_DOWN(offset, limit);
+ end = MIN(bm_size, offset + limit);
+ write_size = bdrv_dirty_bitmap_serialization_size(bitmap, offset,
+ end - offset);
+ assert(write_size <= s->cluster_size);
+
+ bdrv_dirty_bitmap_serialize_part(bitmap, bm_buf, offset, end - offset);
+ if (write_size < s->cluster_size) {
+ memset(bm_buf + write_size, 0, s->cluster_size - write_size);
+ }
+
+ cluster_off = parallels_allocate_host_clusters(bs, &alloc_size);
+ if (cluster_off <= 0) {
+ goto end;
+ }
+
+ ret = bdrv_pwrite(bs->file, cluster_off, s->cluster_size, bm_buf, 0);
+ if (ret < 0) {
+ memset(&fh->magic, 0, sizeof(fh->magic));
+ parallels_mark_unused(bs, s->used_bmap, s->used_bmap_size,
+ cluster_off, 1);
+ goto end;
+ }
+
+ l1_table[idx] = cpu_to_le64(cluster_off >> BDRV_SECTOR_BITS);
+ offset = end;
+ }
+
+ *buf_size -= buf_used;
+ *buf += buf_used;
+
+end:
+ qemu_vfree(bm_buf);
+}
+
+void GRAPH_RDLOCK
+parallels_store_persistent_dirty_bitmaps(BlockDriverState *bs, Error **errp)
+{
+ BDRVParallelsState *s = bs->opaque;
+ BdrvDirtyBitmap *bitmap;
+ ParallelsFormatExtensionHeader *eh;
+ int remaining = s->cluster_size;
+ uint8_t *buf, *pos;
+ int64_t header_off, alloc_size = 1;
+ g_autofree uint8_t *hash = NULL;
+ size_t hash_len = 0;
+ int ret;
+
+ s->header->ext_off = 0;
+
+ if (!bdrv_has_named_bitmaps(bs)) {
+ return;
+ }
+
+ buf = qemu_blockalign0(bs, s->cluster_size);
+
+ eh = (ParallelsFormatExtensionHeader *)buf;
+ pos = buf + sizeof(*eh);
+
+ eh->magic = cpu_to_le64(PARALLELS_FORMAT_EXTENSION_MAGIC);
+
+ FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
+ parallels_save_bitmap(bs, bitmap, &pos, &remaining);
+ }
+
+ header_off = parallels_allocate_host_clusters(bs, &alloc_size);
+ if (header_off < 0) {
+ error_report("Can't save dirty bitmap: cluster allocation error");
+ ret = header_off;
+ goto end;
+ }
+
+ ret = qcrypto_hash_bytes(QCRYPTO_HASH_ALG_MD5,
+ (const char *)(buf + sizeof(*eh)),
+ s->cluster_size - sizeof(*eh),
+ &hash, &hash_len, errp);
+ if (ret < 0 || hash_len != sizeof(eh->check_sum)) {
+ error_report("Can't save dirty bitmap: hash error");
+ ret = -EINVAL;
+ goto end;
+ }
+ memcpy(eh->check_sum, hash, hash_len);
+
+ ret = bdrv_pwrite(bs->file, header_off, s->cluster_size, buf, 0);
+ if (ret < 0) {
+ error_report("Can't save dirty bitmap: IO error");
+ parallels_mark_unused(bs, s->used_bmap, s->used_bmap_size,
+ header_off, 1);
+ goto end;
+ }
+
+ s->header->ext_off = cpu_to_le64(header_off / BDRV_SECTOR_SIZE);
+end:
+ qemu_vfree(buf);
+}
+
+bool coroutine_fn parallels_co_can_store_new_dirty_bitmap(BlockDriverState *bs,
+ const char *name,
+ uint32_t granularity,
+ Error **errp)
+{
+ if (bdrv_find_dirty_bitmap(bs, name)) {
+ error_setg(errp, "Bitmap already exists: %s", name);
+ return false;
+ }
+
+ return true;
+}
diff --git a/block/parallels.c b/block/parallels.c
index f38dd2309c..a49922c6a7 100644
--- a/block/parallels.c
+++ b/block/parallels.c
@@ -1466,14 +1466,25 @@ fail:
static int GRAPH_RDLOCK parallels_inactivate(BlockDriverState *bs)
{
BDRVParallelsState *s = bs->opaque;
+ Error *err = NULL;
int ret;
+ parallels_store_persistent_dirty_bitmaps(bs, &err);
+ if (err != NULL) {
+ error_reportf_err(err, "Lost persistent bitmaps during "
+ "inactivation of node '%s': ",
+ bdrv_get_device_or_node_name(bs));
+ }
+
s->header->inuse = 0;
parallels_update_header(bs);
/* errors are ignored, so we might as well pass exact=true */
- ret = bdrv_truncate(bs->file, s->data_end << BDRV_SECTOR_BITS, true,
- PREALLOC_MODE_OFF, 0, NULL);
+ ret = bdrv_truncate(bs->file, s->data_end << BDRV_SECTOR_BITS,
+ true, PREALLOC_MODE_OFF, 0, NULL);
+ if (ret < 0) {
+ error_report("Failed to truncate image: %s", strerror(-ret));
+ }
return ret;
}
@@ -1525,6 +1536,7 @@ static BlockDriver bdrv_parallels = {
.bdrv_co_pdiscard = parallels_co_pdiscard,
.bdrv_co_pwrite_zeroes = parallels_co_pwrite_zeroes,
.bdrv_inactivate = parallels_inactivate,
+ .bdrv_co_can_store_new_dirty_bitmap = parallels_co_can_store_new_dirty_bitmap,
};
static void bdrv_parallels_init(void)
diff --git a/block/parallels.h b/block/parallels.h
index 493c89e976..9db4f5c908 100644
--- a/block/parallels.h
+++ b/block/parallels.h
@@ -101,5 +101,10 @@ int64_t GRAPH_RDLOCK parallels_allocate_host_clusters(BlockDriverState *bs,
int GRAPH_RDLOCK
parallels_read_format_extension(BlockDriverState *bs, int64_t ext_off,
Error **errp);
+void GRAPH_RDLOCK
+parallels_store_persistent_dirty_bitmaps(BlockDriverState *bs, Error **errp);
+bool coroutine_fn
+parallels_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
+ uint32_t granularity, Error **errp);
#endif
--
2.40.1
next prev parent reply other threads:[~2023-12-28 10:14 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-28 10:12 [PATCH v4 00/21] parallels: Add full dirty bitmap support Alexander Ivanov
2023-12-28 10:12 ` [PATCH v4 01/21] parallels: Set s->used_bmap to NULL in parallels_free_used_bitmap() Alexander Ivanov
2023-12-28 10:12 ` [PATCH v4 02/21] parallels: Move inactivation code to a separate function Alexander Ivanov
2023-12-28 10:12 ` [PATCH v4 03/21] parallels: Make mark_used() a global function Alexander Ivanov
2023-12-28 10:12 ` [PATCH v4 04/21] parallels: Limit search in parallels_mark_used to the last marked claster Alexander Ivanov
2024-01-16 13:52 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 05/21] parallels: Add parallels_mark_unused() helper Alexander Ivanov
2024-01-16 13:54 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 06/21] parallels: Move host clusters allocation to a separate function Alexander Ivanov
2024-01-16 14:19 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 07/21] parallels: Set data_end value in parallels_check_leak() Alexander Ivanov
2024-01-16 14:21 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 08/21] parallels: Recreate used bitmap " Alexander Ivanov
2024-01-16 14:24 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 09/21] parallels: Add a note about used bitmap in parallels_check_duplicate() Alexander Ivanov
2024-01-16 14:30 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 10/21] parallels: Create used bitmap even if checks needed Alexander Ivanov
2024-01-16 14:37 ` Denis V. Lunev
2023-12-28 10:12 ` Alexander Ivanov [this message]
2024-01-18 13:27 ` [PATCH v4 11/21] parallels: Add dirty bitmaps saving Denis V. Lunev
2024-02-07 12:42 ` Alexander Ivanov
2023-12-28 10:12 ` [PATCH v4 12/21] parallels: Let image extensions work in RW mode Alexander Ivanov
2024-01-16 14:45 ` Denis V. Lunev
2024-01-18 13:31 ` Denis V. Lunev
2024-02-28 10:25 ` Alexander Ivanov
2024-02-28 12:11 ` Denis V. Lunev
2024-01-18 13:35 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 13/21] parallels: Handle L1 entries equal to one Alexander Ivanov
2024-01-18 13:37 ` Denis V. Lunev
2024-02-29 11:57 ` Alexander Ivanov
2023-12-28 10:12 ` [PATCH v4 14/21] parallels: Make a loaded dirty bitmap persistent Alexander Ivanov
2024-01-18 13:59 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 15/21] parallels: Reverse a conditional in parallels_check_leak() to reduce indents Alexander Ivanov
2024-01-18 14:49 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 16/21] parallels: Truncate images on the last used cluster Alexander Ivanov
2024-01-18 14:52 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 17/21] parallels: Check unused clusters in parallels_check_leak() Alexander Ivanov
2024-01-18 14:55 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 18/21] parallels: Remove unnecessary data_end field Alexander Ivanov
2024-01-18 15:00 ` Denis V. Lunev
2023-12-28 10:12 ` [PATCH v4 19/21] tests: Add parallels images support to test 165 Alexander Ivanov
2023-12-28 10:12 ` [PATCH v4 20/21] tests: Turned on 256, 299, 304 and block-status-cache for parallels format Alexander Ivanov
2023-12-28 10:12 ` [PATCH v4 21/21] tests: Add parallels format support to image-fleecing Alexander Ivanov
2023-12-29 15:59 ` Vladimir Sementsov-Ogievskiy
2024-01-18 15:01 ` [PATCH v4 00/21] parallels: Add full dirty bitmap support Denis V. Lunev
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231228101232.372142-12-alexander.ivanov@virtuozzo.com \
--to=alexander.ivanov@virtuozzo.com \
--cc=den@virtuozzo.com \
--cc=hreitz@redhat.com \
--cc=kwolf@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
--cc=vsementsov@yandex-team.ru \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).