From: Yu Kuai <yukuai@fnnas.com>
To: linux-raid@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, Li Nan <linan122@huawei.com>,
Yu Kuai <yukuai@fnnas.com>, Cheng Cheng <chencheng@fnnas.com>
Subject: [PATCH] md/md-llbitmap: remap checkpointed bits as reshape progresses
Date: Sun, 19 Apr 2026 11:09:34 +0800 [thread overview]
Message-ID: <20260419030942.824195-12-yukuai@fnnas.com> (raw)
In-Reply-To: <20260419030942.824195-1-yukuai@fnnas.com>
Merge checkpointed old llbitmap state forward as reshape_position
advances and record the checkpoint remap through reshape_mark().
Signed-off-by: Yu Kuai <yukuai@fnnas.com>
---
drivers/md/md-llbitmap.c | 172 +++++++++++++++++++++++++++++++++++++++
1 file changed, 172 insertions(+)
diff --git a/drivers/md/md-llbitmap.c b/drivers/md/md-llbitmap.c
index ad1b7a85914b..b3ff67779557 100644
--- a/drivers/md/md-llbitmap.c
+++ b/drivers/md/md-llbitmap.c
@@ -435,6 +435,14 @@ static void llbitmap_map_layout(struct llbitmap *llbitmap, sector_t *offset,
else if (!previous && llbitmap->mddev->pers->bitmap_sector)
llbitmap->mddev->pers->bitmap_sector(llbitmap->mddev, offset,
sectors);
+
+ limit = llbitmap_personality_sync_size(llbitmap, previous);
+ start = *offset;
+ end = start + *sectors;
+ if (start >= limit)
+ *sectors = 0;
+ else if (end > limit)
+ *sectors = limit - start;
}
static void llbitmap_encode_range(struct llbitmap *llbitmap, sector_t *offset,
@@ -787,6 +795,33 @@ static int llbitmap_prepare_resize(struct llbitmap *llbitmap,
return 0;
}
+static enum llbitmap_state
+llbitmap_rmerge_state(struct llbitmap *llbitmap,
+ enum llbitmap_state dst,
+ enum llbitmap_state src)
+{
+ bool level_456 = raid_is_456(llbitmap->mddev);
+
+ if (dst == BitNeedSync || dst == BitSyncing ||
+ src == BitNeedSync || src == BitSyncing)
+ return BitNeedSync;
+
+ if (dst == BitDirty || src == BitDirty)
+ return BitDirty;
+
+ /*
+ * Reshape generates valid target parity/data for both already-written
+ * and not-yet-written regions in the checkpointed range, so a mix of
+ * clean and unwritten still results in a clean destination bit.
+ */
+ if (level_456 && ((dst == BitClean && src == BitUnwritten) ||
+ (src == BitClean && dst == BitUnwritten)))
+ return BitClean;
+ if (dst == BitClean || src == BitClean)
+ return BitClean;
+ return BitUnwritten;
+}
+
static void llbitmap_init_state(struct llbitmap *llbitmap)
{
enum llbitmap_state state = BitUnwritten;
@@ -1656,6 +1691,120 @@ static int llbitmap_reshape_can_start(struct mddev *mddev)
return ret;
}
+struct llbitmap_reshape_range {
+ sector_t offset;
+ unsigned long sectors;
+ sector_t start;
+ sector_t end;
+};
+
+static enum llbitmap_state
+llbitmap_reshape_init_dst(struct llbitmap *llbitmap, unsigned long dst,
+ const struct llbitmap_reshape_range *new)
+{
+ u64 bit_start = (u64)dst * llbitmap->reshape_chunksize;
+ u64 bit_end = bit_start + llbitmap->reshape_chunksize;
+
+ if (!llbitmap->mddev->reshape_backwards)
+ return bit_start < new->offset ? llbitmap_read(llbitmap, dst) :
+ BitUnwritten;
+ return bit_end > new->end ? llbitmap_read(llbitmap, dst) : BitUnwritten;
+}
+
+static void llbitmap_reshape_dst_range(struct llbitmap *llbitmap,
+ unsigned long dst,
+ const struct llbitmap_reshape_range *new,
+ struct llbitmap_reshape_range *dst_range)
+{
+ sector_t dst_bit_start = (sector_t)dst * llbitmap->reshape_chunksize;
+
+ dst_range->start = max(dst_bit_start, new->offset);
+ dst_range->end = min(dst_bit_start + llbitmap->reshape_chunksize,
+ new->end);
+ dst_range->offset = dst_range->start;
+ dst_range->sectors = dst_range->end - dst_range->start;
+}
+
+static void llbitmap_reshape_map_range(struct llbitmap *llbitmap,
+ sector_t lo, sector_t hi,
+ bool previous,
+ struct llbitmap_reshape_range *range)
+{
+ range->offset = lo;
+ range->sectors = hi - lo;
+ llbitmap_map_layout(llbitmap, &range->offset, &range->sectors, previous);
+ range->start = range->offset;
+ range->end = range->offset + range->sectors;
+}
+
+static bool llbitmap_reshape_src_range(const struct llbitmap_reshape_range *old,
+ const struct llbitmap_reshape_range *new,
+ const struct llbitmap_reshape_range *dst,
+ struct llbitmap_reshape_range *src)
+{
+ if (!old->sectors)
+ return false;
+
+ src->start = old->offset +
+ mul_u64_u64_div_u64(dst->start - new->offset,
+ old->sectors, new->sectors);
+ src->end = old->offset +
+ mul_u64_u64_div_u64_roundup(dst->end - new->offset,
+ old->sectors, new->sectors);
+ if (src->end > old->end)
+ src->end = old->end;
+ src->offset = src->start;
+ src->sectors = src->end - src->start;
+
+ return src->sectors;
+}
+
+static enum llbitmap_state llbitmap_rmerge_src(struct llbitmap *llbitmap,
+ enum llbitmap_state state,
+ const struct llbitmap_reshape_range *src)
+{
+ unsigned long bit = div64_u64(src->start, llbitmap->chunksize);
+ unsigned long end = div64_u64(src->end - 1, llbitmap->chunksize);
+
+ while (bit <= end) {
+ enum llbitmap_state src_state = llbitmap_read(llbitmap, bit);
+
+ state = llbitmap_rmerge_state(llbitmap, state, src_state);
+ bit++;
+ }
+
+ return state;
+}
+
+static void llbitmap_reshape_merge(struct llbitmap *llbitmap,
+ const struct llbitmap_reshape_range *old,
+ const struct llbitmap_reshape_range *new)
+{
+ unsigned long dst_start;
+ unsigned long dst_end;
+ unsigned long dst;
+
+ if (!new->sectors)
+ return;
+
+ dst_start = div64_u64(new->offset, llbitmap->reshape_chunksize);
+ dst_end = div64_u64(new->end - 1, llbitmap->reshape_chunksize);
+
+ for (dst = dst_start; dst <= dst_end; dst++) {
+ struct llbitmap_reshape_range dst_range;
+ struct llbitmap_reshape_range src;
+ enum llbitmap_state state;
+
+ llbitmap_reshape_dst_range(llbitmap, dst, new, &dst_range);
+ state = llbitmap_reshape_init_dst(llbitmap, dst, new);
+ if (llbitmap_reshape_src_range(old, new, &dst_range, &src))
+ state = llbitmap_rmerge_src(llbitmap, state, &src);
+ else
+ state = llbitmap_rmerge_state(llbitmap, state, BitUnwritten);
+ llbitmap_write(llbitmap, state, dst);
+ }
+}
+
static void llbitmap_reshape_finish(struct mddev *mddev)
{
struct llbitmap *llbitmap = mddev->bitmap;
@@ -1680,6 +1829,28 @@ static void llbitmap_reshape_finish(struct mddev *mddev)
mddev->pers->quiesce(mddev, 0);
}
+static void llbitmap_reshape_mark(struct mddev *mddev, sector_t old_pos,
+ sector_t new_pos)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ sector_t lo;
+ sector_t hi;
+ struct llbitmap_reshape_range old;
+ struct llbitmap_reshape_range new;
+
+ if (!llbitmap || old_pos == new_pos)
+ return;
+
+ lo = min(old_pos, new_pos);
+ hi = max(old_pos, new_pos);
+ if (!hi)
+ return;
+
+ llbitmap_reshape_map_range(llbitmap, lo, hi, true, &old);
+ llbitmap_reshape_map_range(llbitmap, lo, hi, false, &new);
+ llbitmap_reshape_merge(llbitmap, &old, &new);
+}
+
static void llbitmap_write_sb(struct llbitmap *llbitmap)
{
int nr_blocks = DIV_ROUND_UP(BITMAP_DATA_OFFSET, llbitmap->io_size);
@@ -1937,6 +2108,7 @@ static struct bitmap_operations llbitmap_ops = {
.prepare_range = llbitmap_prepare_range,
.reshape_finish = llbitmap_reshape_finish,
.reshape_can_start = llbitmap_reshape_can_start,
+ .reshape_mark = llbitmap_reshape_mark,
.write_all = llbitmap_write_all,
.group = &md_llbitmap_group,
--
2.51.0
next prev parent reply other threads:[~2026-04-19 3:10 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-19 3:09 [PATCH 00/19] md: support llbitmap reshape for raid10 and raid5 Yu Kuai
2026-04-19 3:09 ` [PATCH] md: add exact bitmap mapping and reshape hooks Yu Kuai
2026-04-19 3:09 ` [PATCH] md: add helper to split bios at reshape offset Yu Kuai
2026-04-19 3:09 ` [PATCH] md/md-llbitmap: track bitmap sync_size explicitly Yu Kuai
2026-04-19 3:09 ` [PATCH] md/md-llbitmap: allocate page controls independently Yu Kuai
2026-04-19 3:09 ` [PATCH] md/md-llbitmap: grow the page cache in place for reshape Yu Kuai
2026-04-19 3:09 ` [PATCH] md/md-llbitmap: track target reshape geometry fields Yu Kuai
2026-04-19 3:09 ` [PATCH] md/md-llbitmap: finish reshape geometry Yu Kuai
2026-04-19 3:09 ` [PATCH] md/md-llbitmap: refuse reshape while llbitmap still needs sync Yu Kuai
2026-04-19 3:09 ` [PATCH] md/md-llbitmap: add reshape range mapping helpers Yu Kuai
2026-04-19 3:09 ` [PATCH] md/md-llbitmap: don't skip reshape ranges from bitmap state Yu Kuai
2026-04-19 3:09 ` Yu Kuai [this message]
2026-04-19 3:09 ` [PATCH] md/md-llbitmap: clamp state-machine walks to tracked bits Yu Kuai
2026-04-19 3:09 ` [PATCH] md/raid10: reject llbitmap chunk shrink during reshape Yu Kuai
2026-04-19 3:09 ` [PATCH] md/raid10: wire llbitmap reshape lifecycle Yu Kuai
2026-04-30 2:37 ` kernel test robot
2026-04-19 3:09 ` [PATCH] md/raid10: split reshape bios before bitmap accounting Yu Kuai
2026-04-19 3:09 ` [PATCH] md/raid5: add exact old and new llbitmap mapping helpers Yu Kuai
2026-05-01 18:51 ` kernel test robot
2026-04-19 3:09 ` [PATCH] md/raid5: reject llbitmap chunk shrink during reshape Yu Kuai
2026-04-19 3:09 ` [PATCH] md/raid5: wire llbitmap reshape lifecycle Yu Kuai
2026-04-19 3:09 ` [PATCH] md/raid5: split reshape bios before bitmap accounting Yu Kuai
2026-04-30 0:59 ` kernel test robot
2026-04-30 4:07 ` kernel test robot
2026-04-30 19:48 ` kernel test robot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260419030942.824195-12-yukuai@fnnas.com \
--to=yukuai@fnnas.com \
--cc=chencheng@fnnas.com \
--cc=linan122@huawei.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-raid@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox