From: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
To: qemu-devel@nongnu.org
Cc: qemu-block@nongnu.org, den@virtuozzo.com, stefanha@redhat.com,
vsementsov@yandex-team.ru, kwolf@redhat.com, hreitz@redhat.com
Subject: [PATCH v3 05/21] parallels: Move host clusters allocation to a separate function
Date: Fri, 27 Oct 2023 09:46:20 +0200 [thread overview]
Message-ID: <20231027074636.430139-6-alexander.ivanov@virtuozzo.com> (raw)
In-Reply-To: <20231027074636.430139-1-alexander.ivanov@virtuozzo.com>
For parallels images extensions we need to allocate host clusters
without any connection to BAT. Move host clusters allocation code to
allocate_host_clusters().
Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
---
block/parallels.c | 128 ++++++++++++++++++++++++----------------------
block/parallels.h | 4 ++
2 files changed, 72 insertions(+), 60 deletions(-)
diff --git a/block/parallels.c b/block/parallels.c
index a30bb5fe0d..33bb8f1084 100644
--- a/block/parallels.c
+++ b/block/parallels.c
@@ -266,58 +266,31 @@ static void parallels_free_used_bitmap(BlockDriverState *bs)
s->used_bmap = NULL;
}
-static int64_t coroutine_fn GRAPH_RDLOCK
-allocate_clusters(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, int *pnum)
+int64_t parallels_allocate_host_clusters(BlockDriverState *bs,
+ int64_t *clusters)
{
- int ret = 0;
BDRVParallelsState *s = bs->opaque;
- int64_t i, pos, idx, to_allocate, first_free, host_off;
-
- pos = block_status(s, sector_num, nb_sectors, pnum);
- if (pos > 0) {
- return pos;
- }
-
- idx = sector_num / s->tracks;
- to_allocate = DIV_ROUND_UP(sector_num + *pnum, s->tracks) - idx;
-
- /*
- * This function is called only by parallels_co_writev(), which will never
- * pass a sector_num at or beyond the end of the image (because the block
- * layer never passes such a sector_num to that function). Therefore, idx
- * is always below s->bat_size.
- * block_status() will limit *pnum so that sector_num + *pnum will not
- * exceed the image end. Therefore, idx + to_allocate cannot exceed
- * s->bat_size.
- * Note that s->bat_size is an unsigned int, therefore idx + to_allocate
- * will always fit into a uint32_t.
- */
- assert(idx < s->bat_size && idx + to_allocate <= s->bat_size);
+ int64_t first_free, next_used, host_off, prealloc_clusters;
+ int64_t bytes, prealloc_bytes;
+ uint32_t new_usedsize;
+ int ret = 0;
first_free = find_first_zero_bit(s->used_bmap, s->used_bmap_size);
if (first_free == s->used_bmap_size) {
- uint32_t new_usedsize;
- int64_t bytes = to_allocate * s->cluster_size;
- bytes += s->prealloc_size * BDRV_SECTOR_SIZE;
-
host_off = s->data_end * BDRV_SECTOR_SIZE;
+ prealloc_clusters = *clusters + s->prealloc_size / s->tracks;
+ bytes = *clusters * s->cluster_size;
+ prealloc_bytes = prealloc_clusters * s->cluster_size;
- /*
- * We require the expanded size to read back as zero. If the
- * user permitted truncation, we try that; but if it fails, we
- * force the safer-but-slower fallocate.
- */
if (s->prealloc_mode == PRL_PREALLOC_MODE_TRUNCATE) {
- ret = bdrv_co_truncate(bs->file, host_off + bytes,
- false, PREALLOC_MODE_OFF,
- BDRV_REQ_ZERO_WRITE, NULL);
+ ret = bdrv_truncate(bs->file, host_off + prealloc_bytes, false,
+ PREALLOC_MODE_OFF, BDRV_REQ_ZERO_WRITE, NULL);
if (ret == -ENOTSUP) {
s->prealloc_mode = PRL_PREALLOC_MODE_FALLOCATE;
}
}
if (s->prealloc_mode == PRL_PREALLOC_MODE_FALLOCATE) {
- ret = bdrv_co_pwrite_zeroes(bs->file, host_off, bytes, 0);
+ ret = bdrv_pwrite_zeroes(bs->file, host_off, prealloc_bytes, 0);
}
if (ret < 0) {
return ret;
@@ -327,15 +300,15 @@ allocate_clusters(BlockDriverState *bs, int64_t sector_num,
s->used_bmap = bitmap_zero_extend(s->used_bmap, s->used_bmap_size,
new_usedsize);
s->used_bmap_size = new_usedsize;
+ if (host_off + bytes > s->data_end * BDRV_SECTOR_SIZE) {
+ s->data_end = (host_off + bytes) / BDRV_SECTOR_SIZE;
+ }
} else {
- int64_t next_used;
next_used = find_next_bit(s->used_bmap, s->used_bmap_size, first_free);
/* Not enough continuous clusters in the middle, adjust the size */
- if (next_used - first_free < to_allocate) {
- to_allocate = next_used - first_free;
- *pnum = (idx + to_allocate) * s->tracks - sector_num;
- }
+ *clusters = MIN(*clusters, next_used - first_free);
+ bytes = *clusters * s->cluster_size;
host_off = s->data_start * BDRV_SECTOR_SIZE;
host_off += first_free * s->cluster_size;
@@ -347,14 +320,59 @@ allocate_clusters(BlockDriverState *bs, int64_t sector_num,
*/
if (s->prealloc_mode == PRL_PREALLOC_MODE_FALLOCATE &&
host_off < s->data_end * BDRV_SECTOR_SIZE) {
- ret = bdrv_co_pwrite_zeroes(bs->file, host_off,
- s->cluster_size * to_allocate, 0);
+ ret = bdrv_pwrite_zeroes(bs->file, host_off, bytes, 0);
if (ret < 0) {
return ret;
}
}
}
+ ret = parallels_mark_used(bs, s->used_bmap, s->used_bmap_size,
+ host_off, *clusters);
+ if (ret < 0) {
+ /* Image consistency is broken. Alarm! */
+ return ret;
+ }
+
+ return host_off;
+}
+
+static int64_t coroutine_fn GRAPH_RDLOCK
+allocate_clusters(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors, int *pnum)
+{
+ int ret = 0;
+ BDRVParallelsState *s = bs->opaque;
+ int64_t i, pos, idx, to_allocate, host_off;
+
+ pos = block_status(s, sector_num, nb_sectors, pnum);
+ if (pos > 0) {
+ return pos;
+ }
+
+ idx = sector_num / s->tracks;
+ to_allocate = DIV_ROUND_UP(sector_num + *pnum, s->tracks) - idx;
+
+ /*
+ * This function is called only by parallels_co_writev(), which will never
+ * pass a sector_num at or beyond the end of the image (because the block
+ * layer never passes such a sector_num to that function). Therefore, idx
+ * is always below s->bat_size.
+ * block_status() will limit *pnum so that sector_num + *pnum will not
+ * exceed the image end. Therefore, idx + to_allocate cannot exceed
+ * s->bat_size.
+ * Note that s->bat_size is an unsigned int, therefore idx + to_allocate
+ * will always fit into a uint32_t.
+ */
+ assert(idx < s->bat_size && idx + to_allocate <= s->bat_size);
+
+ host_off = parallels_allocate_host_clusters(bs, &to_allocate);
+ if (host_off < 0) {
+ return host_off;
+ }
+
+ *pnum = MIN(*pnum, (idx + to_allocate) * s->tracks - sector_num);
+
/*
* Try to read from backing to fill empty clusters
* FIXME: 1. previous write_zeroes may be redundant
@@ -371,33 +389,23 @@ allocate_clusters(BlockDriverState *bs, int64_t sector_num,
ret = bdrv_co_pread(bs->backing, idx * s->tracks * BDRV_SECTOR_SIZE,
nb_cow_bytes, buf, 0);
- if (ret < 0) {
- qemu_vfree(buf);
- return ret;
+ if (ret == 0) {
+ ret = bdrv_co_pwrite(bs->file, host_off, nb_cow_bytes, buf, 0);
}
- ret = bdrv_co_pwrite(bs->file, s->data_end * BDRV_SECTOR_SIZE,
- nb_cow_bytes, buf, 0);
qemu_vfree(buf);
if (ret < 0) {
+ parallels_mark_unused(bs, s->used_bmap, s->used_bmap_size,
+ host_off, to_allocate);
return ret;
}
}
- ret = parallels_mark_used(bs, s->used_bmap, s->used_bmap_size,
- host_off, to_allocate);
- if (ret < 0) {
- /* Image consistency is broken. Alarm! */
- return ret;
- }
for (i = 0; i < to_allocate; i++) {
parallels_set_bat_entry(s, idx + i,
host_off / BDRV_SECTOR_SIZE / s->off_multiplier);
host_off += s->cluster_size;
}
- if (host_off > s->data_end * BDRV_SECTOR_SIZE) {
- s->data_end = host_off / BDRV_SECTOR_SIZE;
- }
return bat2sect(s, idx) + sector_num % s->tracks;
}
diff --git a/block/parallels.h b/block/parallels.h
index 31ebbd6846..4e7aa6b80f 100644
--- a/block/parallels.h
+++ b/block/parallels.h
@@ -95,7 +95,11 @@ int parallels_mark_used(BlockDriverState *bs, unsigned long *bitmap,
int parallels_mark_unused(BlockDriverState *bs, unsigned long *bitmap,
uint32_t bitmap_size, int64_t off, uint32_t count);
+int64_t parallels_allocate_host_clusters(BlockDriverState *bs,
+ int64_t *clusters);
+
int parallels_read_format_extension(BlockDriverState *bs,
int64_t ext_off, Error **errp);
+
#endif
--
2.34.1
next prev parent reply other threads:[~2023-10-27 7:51 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-27 7:46 [PATCH v3 00/21] parallels: Add full dirty bitmap support Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 01/21] parallels: Set s->used_bmap to NULL in parallels_free_used_bitmap() Alexander Ivanov
2023-10-30 8:44 ` Denis V. Lunev
2023-10-27 7:46 ` [PATCH v3 02/21] parallels: Move inactivation code to a separate function Alexander Ivanov
2023-10-30 8:45 ` Denis V. Lunev
2023-10-27 7:46 ` [PATCH v3 03/21] parallels: Make mark_used() a global function Alexander Ivanov
2023-10-30 8:47 ` Denis V. Lunev
2023-10-27 7:46 ` [PATCH v3 04/21] parallels: Add parallels_mark_unused() helper Alexander Ivanov
2023-10-30 9:06 ` Denis V. Lunev
2023-10-30 9:09 ` Denis V. Lunev
2023-11-13 9:53 ` Alexander Ivanov
2023-10-27 7:46 ` Alexander Ivanov [this message]
2023-10-27 7:46 ` [PATCH v3 06/21] parallels: Set data_end value in parallels_check_leak() Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 07/21] parallels: Recreate used bitmap " Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 08/21] parallels: Add a note about used bitmap in parallels_check_duplicate() Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 09/21] parallels: Create used bitmap even if checks needed Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 10/21] parallels: Add dirty bitmaps saving Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 11/21] parallels: Mark parallels_inactivate GRAPH_RDLOCK, guard parallels_close Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 12/21] parallels: Let image extensions work in RW mode Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 13/21] parallels: Handle L1 entries equal to one Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 14/21] parallels: Make a loaded dirty bitmap persistent Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 15/21] parallels: Reverse a conditional in parallels_check_leak() to reduce indents Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 16/21] parallels: Truncate images on the last used cluster Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 17/21] parallels: Check unused clusters in parallels_check_leak() Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 18/21] parallels: Remove unnecessary data_end field Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 19/21] tests: Add parallels images support to test 165 Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 20/21] tests: Turned on 256, 299, 304 and block-status-cache for parallels format Alexander Ivanov
2023-10-27 7:46 ` [PATCH v3 21/21] tests: Add parallels format support to image-fleecing Alexander Ivanov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231027074636.430139-6-alexander.ivanov@virtuozzo.com \
--to=alexander.ivanov@virtuozzo.com \
--cc=den@virtuozzo.com \
--cc=hreitz@redhat.com \
--cc=kwolf@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
--cc=vsementsov@yandex-team.ru \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).