* [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks
@ 2025-12-31 7:09 Zheng Qixing
2025-12-31 7:09 ` [RFC PATCH 1/5] md: add helpers for requested sync action Zheng Qixing
` (5 more replies)
0 siblings, 6 replies; 21+ messages in thread
From: Zheng Qixing @ 2025-12-31 7:09 UTC (permalink / raw)
To: song, yukuai
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1,
zhengqixing, linan122
From: Zheng Qixing <zhengqixing@huawei.com>
In RAID1, some sectors may be marked as bad blocks due to I/O errors.
In certain scenarios, these bad blocks might not be permanent, and
issuing I/Os again could succeed.
To address this situation, a new sync action ('rectify') introduced
into RAID1 , allowing users to actively trigger the repair of existing
bad blocks and clear it in sys bad_blocks.
When echo rectify into /sys/block/md*/md/sync_action, a healthy disk is
selected from the array to read data and then writes it to the disk where
the bad block is located. If the write request succeeds, the bad block
record can be cleared.
Note:
This patchset depends on [1] from Li Nan which is currently under review
and not yet merged into md-6.19.
[1] [PATCH v3 00/13] cleanup and bugfix of sync
Link: https://lore.kernel.org/all/20251215030444.1318434-1-linan666@huaweicloud.com/
Zheng Qixing (5):
md: add helpers for requested sync action
md: clear stale sync flags when frozen before sync starts
md: simplify sync action print in status_resync
md: introduce MAX_RAID_DISKS macro to replace magic number
md/raid1: introduce rectify action to repair badblocks
drivers/md/md.c | 184 ++++++++++++++++++++++-----
drivers/md/md.h | 17 +++
drivers/md/raid1.c | 308 ++++++++++++++++++++++++++++++++++++++++++++-
drivers/md/raid1.h | 1 +
4 files changed, 472 insertions(+), 38 deletions(-)
--
2.39.2
^ permalink raw reply [flat|nested] 21+ messages in thread
* [RFC PATCH 1/5] md: add helpers for requested sync action
2025-12-31 7:09 [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks Zheng Qixing
@ 2025-12-31 7:09 ` Zheng Qixing
2026-01-06 12:59 ` Li Nan
2025-12-31 7:09 ` [RFC PATCH 2/5] md: clear stale sync flags when frozen before sync starts Zheng Qixing
` (4 subsequent siblings)
5 siblings, 1 reply; 21+ messages in thread
From: Zheng Qixing @ 2025-12-31 7:09 UTC (permalink / raw)
To: song, yukuai
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1,
zhengqixing, linan122
From: Zheng Qixing <zhengqixing@huawei.com>
Add helpers for handling requested sync action.
In handle_requested_sync_action(), add mutual exclusivity checks between
check/repair operations. This prevents the scenario where one operation
is requested, but before MD_RECOVERY_RUNNING is set, another operation is
requested, resulting in neither an EBUSY return nor proper execution of
the second operation.
Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
---
drivers/md/md.c | 87 +++++++++++++++++++++++++++++++++++++------------
1 file changed, 66 insertions(+), 21 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5df2220b1bd1..ccaa2e6fe079 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -665,6 +665,59 @@ void mddev_put(struct mddev *mddev)
spin_unlock(&all_mddevs_lock);
}
+static int __handle_requested_sync_action(struct mddev *mddev,
+ enum sync_action action)
+{
+ switch (action) {
+ case ACTION_CHECK:
+ set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ fallthrough;
+ case ACTION_REPAIR:
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int handle_requested_sync_action(struct mddev *mddev,
+ enum sync_action action)
+{
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ return -EBUSY;
+ return __handle_requested_sync_action(mddev, action);
+}
+
+static enum sync_action __get_recovery_sync_action(struct mddev *mddev)
+{
+ if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ return ACTION_CHECK;
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ return ACTION_REPAIR;
+ return ACTION_RESYNC;
+}
+
+static enum sync_action get_recovery_sync_action(struct mddev *mddev)
+{
+ return __get_recovery_sync_action(mddev);
+}
+
+static void init_recovery_position(struct mddev *mddev)
+{
+ mddev->resync_min = 0;
+}
+
+static void set_requested_position(struct mddev *mddev, sector_t value)
+{
+ mddev->resync_min = value;
+}
+
+static sector_t get_requested_position(struct mddev *mddev)
+{
+ return mddev->resync_min;
+}
+
static void md_safemode_timeout(struct timer_list *t);
static void md_start_sync(struct work_struct *ws);
@@ -781,7 +834,7 @@ int mddev_init(struct mddev *mddev)
mddev->reshape_position = MaxSector;
mddev->reshape_backwards = 0;
mddev->last_sync_action = ACTION_IDLE;
- mddev->resync_min = 0;
+ init_recovery_position(mddev);
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
@@ -5101,17 +5154,9 @@ enum sync_action md_sync_action(struct mddev *mddev)
if (test_bit(MD_RECOVERY_RECOVER, &recovery))
return ACTION_RECOVER;
- if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
- /*
- * MD_RECOVERY_CHECK must be paired with
- * MD_RECOVERY_REQUESTED.
- */
- if (test_bit(MD_RECOVERY_CHECK, &recovery))
- return ACTION_CHECK;
- if (test_bit(MD_RECOVERY_REQUESTED, &recovery))
- return ACTION_REPAIR;
- return ACTION_RESYNC;
- }
+ /* MD_RECOVERY_CHECK must be paired with MD_RECOVERY_REQUESTED. */
+ if (test_bit(MD_RECOVERY_SYNC, &recovery))
+ return get_recovery_sync_action(mddev);
/*
* MD_RECOVERY_NEEDED or MD_RECOVERY_RUNNING is set, however, no
@@ -5300,11 +5345,10 @@ action_store(struct mddev *mddev, const char *page, size_t len)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
break;
case ACTION_CHECK:
- set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
- fallthrough;
case ACTION_REPAIR:
- set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
- set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ ret = handle_requested_sync_action(mddev, action);
+ if (ret)
+ goto out;
fallthrough;
case ACTION_RESYNC:
case ACTION_IDLE:
@@ -6783,7 +6827,7 @@ static void md_clean(struct mddev *mddev)
mddev->dev_sectors = 0;
mddev->raid_disks = 0;
mddev->resync_offset = 0;
- mddev->resync_min = 0;
+ init_recovery_position(mddev);
mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
/* we still need mddev->external in export_rdev, do not clear it yet */
@@ -9370,7 +9414,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
switch (action) {
case ACTION_CHECK:
case ACTION_REPAIR:
- return mddev->resync_min;
+ return get_requested_position(mddev);
case ACTION_RESYNC:
if (!mddev->bitmap)
return mddev->resync_offset;
@@ -9795,10 +9839,11 @@ void md_do_sync(struct md_thread *thread)
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
- mddev->resync_min = 0;
+ set_requested_position(mddev, 0);
mddev->resync_max = MaxSector;
- } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
- mddev->resync_min = mddev->curr_resync_completed;
+ } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ set_requested_position(mddev, mddev->curr_resync_completed);
+ }
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
mddev->curr_resync = MD_RESYNC_NONE;
spin_unlock(&mddev->lock);
--
2.39.2
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [RFC PATCH 2/5] md: clear stale sync flags when frozen before sync starts
2025-12-31 7:09 [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks Zheng Qixing
2025-12-31 7:09 ` [RFC PATCH 1/5] md: add helpers for requested sync action Zheng Qixing
@ 2025-12-31 7:09 ` Zheng Qixing
2026-01-06 13:07 ` Li Nan
2025-12-31 7:09 ` [RFC PATCH 3/5] md: simplify sync action print in status_resync Zheng Qixing
` (3 subsequent siblings)
5 siblings, 1 reply; 21+ messages in thread
From: Zheng Qixing @ 2025-12-31 7:09 UTC (permalink / raw)
To: song, yukuai
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1,
zhengqixing, linan122
From: Zheng Qixing <zhengqixing@huawei.com>
In md_check_recovery(), add clearing of all sync flags when sync is not
running. This fixes the issue where a sync operation is requested, then
'frozen' is executed before MD_RECOVERY_RUNNING is set, leaving stale
operation flags that cause subsequent operations to fail with EBUSY.
Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
---
drivers/md/md.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ccaa2e6fe079..52e09a9a9288 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -10336,6 +10336,9 @@ void md_check_recovery(struct mddev *mddev)
queue_work(md_misc_wq, &mddev->sync_work);
} else {
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
wake_up(&resync_wait);
}
--
2.39.2
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [RFC PATCH 3/5] md: simplify sync action print in status_resync
2025-12-31 7:09 [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks Zheng Qixing
2025-12-31 7:09 ` [RFC PATCH 1/5] md: add helpers for requested sync action Zheng Qixing
2025-12-31 7:09 ` [RFC PATCH 2/5] md: clear stale sync flags when frozen before sync starts Zheng Qixing
@ 2025-12-31 7:09 ` Zheng Qixing
2026-01-06 13:24 ` Li Nan
2025-12-31 7:09 ` [RFC PATCH 4/5] md: introduce MAX_RAID_DISKS macro to replace magic number Zheng Qixing
` (2 subsequent siblings)
5 siblings, 1 reply; 21+ messages in thread
From: Zheng Qixing @ 2025-12-31 7:09 UTC (permalink / raw)
To: song, yukuai
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1,
zhengqixing, linan122
From: Zheng Qixing <zhengqixing@huawei.com>
No functional change, just code cleanup to make it easier to add new
sync actions later.
Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
---
drivers/md/md.c | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 52e09a9a9288..9eeab5258189 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8684,6 +8684,8 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
sector_t rt, curr_mark_cnt, resync_mark_cnt;
int scale, recovery_active;
unsigned int per_milli;
+ enum sync_action action;
+ const char *sync_action_name;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
@@ -8765,13 +8767,13 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, ".");
seq_printf(seq, "] ");
}
- seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
- (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
- "reshape" :
- (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
- "check" :
- (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
- "resync" : "recovery"))),
+
+ action = md_sync_action(mddev);
+ if (action == ACTION_RECOVER)
+ sync_action_name = "recovery";
+ else
+ sync_action_name = md_sync_action_name(action);
+ seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", sync_action_name,
per_milli/10, per_milli % 10,
(unsigned long long) resync/2,
(unsigned long long) max_sectors/2);
--
2.39.2
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [RFC PATCH 4/5] md: introduce MAX_RAID_DISKS macro to replace magic number
2025-12-31 7:09 [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks Zheng Qixing
` (2 preceding siblings ...)
2025-12-31 7:09 ` [RFC PATCH 3/5] md: simplify sync action print in status_resync Zheng Qixing
@ 2025-12-31 7:09 ` Zheng Qixing
2025-12-31 18:00 ` John Stoffel
2025-12-31 7:09 ` [RFC PATCH 5/5] md/raid1: introduce rectify action to repair badblocks Zheng Qixing
2025-12-31 11:11 ` [RFC PATCH 0/5] md/raid1: introduce a new sync " Roman Mamedov
5 siblings, 1 reply; 21+ messages in thread
From: Zheng Qixing @ 2025-12-31 7:09 UTC (permalink / raw)
To: song, yukuai
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1,
zhengqixing, linan122
From: Zheng Qixing <zhengqixing@huawei.com>
Define MAX_RAID_DISKS macro for the maximum number of RAID disks.
No functional change.
Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
---
drivers/md/md.c | 4 ++--
drivers/md/md.h | 1 +
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9eeab5258189..d2f136706f6c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1888,7 +1888,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
sb->major_version != cpu_to_le32(1) ||
- le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
+ le32_to_cpu(sb->max_dev) > MAX_RAID_DISKS ||
le64_to_cpu(sb->super_offset) != rdev->sb_start ||
(le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
return -EINVAL;
@@ -2065,7 +2065,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
mddev->resync_offset = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
- mddev->max_disks = (4096-256)/2;
+ mddev->max_disks = MAX_RAID_DISKS;
if (!mddev->logical_block_size)
mddev->logical_block_size = le32_to_cpu(sb->logical_block_size);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index a083f37374d0..6a4af4a1959c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -22,6 +22,7 @@
#include <trace/events/block.h>
#define MaxSector (~(sector_t)0)
+#define MAX_RAID_DISKS ((4096-256)/2)
enum md_submodule_type {
MD_PERSONALITY = 0,
--
2.39.2
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [RFC PATCH 5/5] md/raid1: introduce rectify action to repair badblocks
2025-12-31 7:09 [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks Zheng Qixing
` (3 preceding siblings ...)
2025-12-31 7:09 ` [RFC PATCH 4/5] md: introduce MAX_RAID_DISKS macro to replace magic number Zheng Qixing
@ 2025-12-31 7:09 ` Zheng Qixing
2026-01-06 13:43 ` Li Nan
2026-01-14 3:11 ` Li Nan
2025-12-31 11:11 ` [RFC PATCH 0/5] md/raid1: introduce a new sync " Roman Mamedov
5 siblings, 2 replies; 21+ messages in thread
From: Zheng Qixing @ 2025-12-31 7:09 UTC (permalink / raw)
To: song, yukuai
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1,
zhengqixing, linan122
From: Zheng Qixing <zhengqixing@huawei.com>
Add support for repairing known badblocks in RAID1. When disks
have known badblocks (shown in sysfs bad_blocks), data can be
read from other healthy disks in the array and written to repair
the badblock areas and clear it in bad_blocks.
echo rectify > sync_action can trigger this action.
Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
---
drivers/md/md.c | 80 ++++++++++--
drivers/md/md.h | 16 +++
drivers/md/raid1.c | 308 ++++++++++++++++++++++++++++++++++++++++++++-
drivers/md/raid1.h | 1 +
4 files changed, 394 insertions(+), 11 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d2f136706f6c..f5844cfb78fc 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -74,6 +74,7 @@ static const char *action_name[NR_SYNC_ACTIONS] = {
[ACTION_RECOVER] = "recover",
[ACTION_CHECK] = "check",
[ACTION_REPAIR] = "repair",
+ [ACTION_RECTIFY] = "rectify",
[ACTION_RESHAPE] = "reshape",
[ACTION_FROZEN] = "frozen",
[ACTION_IDLE] = "idle",
@@ -665,6 +666,29 @@ void mddev_put(struct mddev *mddev)
spin_unlock(&all_mddevs_lock);
}
+static int raid1_badblocks_precheck(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+ int valid_disks = 0;
+ int ret = -EINVAL;
+
+ if (mddev->level != 1) {
+ pr_err("md/raid1:%s requires raid1 array\n", mdname(mddev));
+ return -EINVAL;
+ }
+
+ rdev_for_each(rdev, mddev) {
+ if (rdev->raid_disk < 0 ||
+ test_bit(Faulty, &rdev->flags))
+ continue;
+ valid_disks++;
+ }
+ if (valid_disks >= 2)
+ ret = 0;
+
+ return ret;
+}
+
static int __handle_requested_sync_action(struct mddev *mddev,
enum sync_action action)
{
@@ -684,9 +708,23 @@ static int __handle_requested_sync_action(struct mddev *mddev,
static int handle_requested_sync_action(struct mddev *mddev,
enum sync_action action)
{
+ int ret;
+
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
return -EBUSY;
- return __handle_requested_sync_action(mddev, action);
+
+ switch (action) {
+ case ACTION_RECTIFY:
+ ret = raid1_badblocks_precheck(mddev);
+ if (ret)
+ return ret;
+ set_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery);
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ return 0;
+ default:
+ return __handle_requested_sync_action(mddev, action);
+ }
}
static enum sync_action __get_recovery_sync_action(struct mddev *mddev)
@@ -700,21 +738,34 @@ static enum sync_action __get_recovery_sync_action(struct mddev *mddev)
static enum sync_action get_recovery_sync_action(struct mddev *mddev)
{
+ if (test_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery))
+ return ACTION_RECTIFY;
return __get_recovery_sync_action(mddev);
}
static void init_recovery_position(struct mddev *mddev)
{
mddev->resync_min = 0;
+ mddev->rectify_min = 0;
+}
+
+static inline void clear_badblock_md_flags(struct mddev *mddev)
+{
+ clear_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery);
}
static void set_requested_position(struct mddev *mddev, sector_t value)
{
- mddev->resync_min = value;
+ if (test_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery))
+ mddev->rectify_min = value;
+ else
+ mddev->resync_min = value;
}
static sector_t get_requested_position(struct mddev *mddev)
{
+ if (test_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery))
+ return mddev->rectify_min;
return mddev->resync_min;
}
@@ -5154,7 +5205,10 @@ enum sync_action md_sync_action(struct mddev *mddev)
if (test_bit(MD_RECOVERY_RECOVER, &recovery))
return ACTION_RECOVER;
- /* MD_RECOVERY_CHECK must be paired with MD_RECOVERY_REQUESTED. */
+ /*
+ * MD_RECOVERY_CHECK/MD_RECOVERY_BADBLOCKS_RECTIFY must be
+ * paired with MD_RECOVERY_REQUESTED.
+ */
if (test_bit(MD_RECOVERY_SYNC, &recovery))
return get_recovery_sync_action(mddev);
@@ -5319,6 +5373,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
break;
case ACTION_RESHAPE:
case ACTION_RECOVER:
+ case ACTION_RECTIFY:
case ACTION_CHECK:
case ACTION_REPAIR:
case ACTION_RESYNC:
@@ -5344,6 +5399,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
break;
+ case ACTION_RECTIFY:
case ACTION_CHECK:
case ACTION_REPAIR:
ret = handle_requested_sync_action(mddev, action);
@@ -9362,6 +9418,7 @@ static sector_t md_sync_max_sectors(struct mddev *mddev,
{
switch (action) {
case ACTION_RESYNC:
+ case ACTION_RECTIFY:
case ACTION_CHECK:
case ACTION_REPAIR:
atomic64_set(&mddev->resync_mismatches, 0);
@@ -9414,6 +9471,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
struct md_rdev *rdev;
switch (action) {
+ case ACTION_RECTIFY:
case ACTION_CHECK:
case ACTION_REPAIR:
return get_requested_position(mddev);
@@ -10039,6 +10097,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ clear_badblock_md_flags(mddev);
/* Start new recovery. */
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
@@ -10096,10 +10155,14 @@ static void md_start_sync(struct work_struct *ws)
if (spares && md_bitmap_enabled(mddev, true))
mddev->bitmap_ops->write_all(mddev);
- name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
- "reshape" : "resync";
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, name));
+ if (!is_badblocks_recovery_requested(mddev) ||
+ !raid1_badblocks_precheck(mddev)) {
+ name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
+ "reshape" : "resync";
+ rcu_assign_pointer(mddev->sync_thread,
+ md_register_thread(md_do_sync, mddev, name));
+ }
+
if (!mddev->sync_thread) {
pr_warn("%s: could not start resync thread...\n",
mdname(mddev));
@@ -10127,6 +10190,7 @@ static void md_start_sync(struct work_struct *ws)
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ clear_badblock_md_flags(mddev);
mddev_unlock(mddev);
/*
* md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
@@ -10341,6 +10405,7 @@ void md_check_recovery(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_badblock_md_flags(mddev);
wake_up(&resync_wait);
}
@@ -10391,6 +10456,7 @@ void md_reap_sync_thread(struct mddev *mddev)
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_badblock_md_flags(mddev);
clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
/*
* We call mddev->cluster_ops->update_size here because sync_size could
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 6a4af4a1959c..58f320b19bba 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -98,6 +98,13 @@ enum sync_action {
* are inconsistent data,
*/
ACTION_REPAIR,
+ /*
+ * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED |
+ * MD_RECOVERY_BADBLOCKS_RECTIFY, start when user echo "rectify"
+ * to sysfs api sync_action, used to repair the badblocks acked
+ * in bad table;
+ */
+ ACTION_RECTIFY,
/*
* Represent by MD_RECOVERY_RESHAPE, start when new member disk is added
* to the conf, notice that this is different from spares or
@@ -524,6 +531,7 @@ struct mddev {
sector_t resync_offset;
sector_t resync_min; /* user requested sync
* starts here */
+ sector_t rectify_min;
sector_t resync_max; /* resync should pause
* when it gets here */
@@ -664,6 +672,8 @@ enum recovery_flags {
MD_RESYNCING_REMOTE,
/* raid456 lazy initial recover */
MD_RECOVERY_LAZY_RECOVER,
+ /* try to repair acked badblocks*/
+ MD_RECOVERY_BADBLOCKS_RECTIFY,
};
enum md_ro_state {
@@ -1016,6 +1026,12 @@ static inline void mddev_unlock_and_resume(struct mddev *mddev)
mddev_resume(mddev);
}
+static inline bool is_badblocks_recovery_requested(struct mddev *mddev)
+{
+ return test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
+ test_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery);
+}
+
struct mdu_array_info_s;
struct mdu_disk_info_s;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 00120c86c443..f304161bc0ce 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -176,7 +176,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* If this is a user-requested check/repair, allocate
* RESYNC_PAGES for each bio.
*/
- if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery))
+ if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery) &&
+ !is_badblocks_recovery_requested(conf->mddev))
need_pages = conf->raid_disks * 2;
else
need_pages = 1;
@@ -2380,6 +2381,301 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
put_sync_write_buf(r1_bio);
}
+static void end_rectify_read(struct bio *bio)
+{
+ struct r1bio *r1_bio = get_resync_r1bio(bio);
+ struct r1conf *conf = r1_bio->mddev->private;
+ struct md_rdev *rdev;
+ struct bio *next_bio;
+ bool all_fail = true;
+ int i;
+
+ update_head_pos(r1_bio->read_disk, r1_bio);
+
+ if (!bio->bi_status) {
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+ goto out;
+ }
+
+ for (i = r1_bio->read_disk + 1; i < conf->raid_disks; i++) {
+ rdev = conf->mirrors[i].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ continue;
+
+ next_bio = r1_bio->bios[i];
+ if (next_bio->bi_end_io == end_rectify_read) {
+ next_bio->bi_opf &= ~MD_FAILFAST;
+ r1_bio->read_disk = i;
+ all_fail = false;
+ break;
+ }
+ }
+
+ if (unlikely(all_fail)) {
+ md_done_sync(r1_bio->mddev, r1_bio->sectors);
+ md_sync_error(r1_bio->mddev);
+ put_buf(r1_bio);
+ return;
+ }
+out:
+ reschedule_retry(r1_bio);
+}
+
+static void end_rectify_write(struct bio *bio)
+{
+ struct r1bio *r1_bio = get_resync_r1bio(bio);
+
+ if (atomic_dec_and_test(&r1_bio->remaining)) {
+ /*
+ * Rectify only attempts to clear acked bad
+ * blocks, and it does not set bad blocks in
+ * cases of R1BIO_WriteError.
+ * Here we reuse R1BIO_MadeGood flag, which
+ * does not guarantee that all write I/Os
+ * actually succeeded.
+ */
+ set_bit(R1BIO_MadeGood, &r1_bio->state);
+ reschedule_retry(r1_bio);
+ }
+}
+
+static void submit_rectify_read(struct r1bio *r1_bio)
+{
+ struct bio *bio;
+
+ bio = r1_bio->bios[r1_bio->read_disk];
+ bio->bi_opf &= ~MD_FAILFAST;
+ bio->bi_status = 0;
+ submit_bio_noacct(bio);
+}
+
+static void rectify_request_write(struct mddev *mddev, struct r1bio *r1_bio)
+{
+ struct r1conf *conf = mddev->private;
+ struct bio *wbio = NULL;
+ int wcnt = 0;
+ int i;
+
+ if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
+ submit_rectify_read(r1_bio);
+ return;
+ }
+
+ atomic_set(&r1_bio->remaining, 0);
+ for (i = 0; i < conf->raid_disks; i++) {
+ wbio = r1_bio->bios[i];
+ if (wbio->bi_end_io == end_rectify_write) {
+ atomic_inc(&r1_bio->remaining);
+ wcnt++;
+ submit_bio_noacct(wbio);
+ }
+ }
+
+ if (unlikely(!wcnt)) {
+ md_done_sync(r1_bio->mddev, r1_bio->sectors);
+ put_buf(r1_bio);
+ }
+}
+
+static void handle_rectify_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
+{
+ struct md_rdev *rdev;
+ struct bio *bio;
+ int i;
+
+ for (i = 0; i < conf->raid_disks; i++) {
+ rdev = conf->mirrors[i].rdev;
+ bio = r1_bio->bios[i];
+ if (bio->bi_end_io == NULL)
+ continue;
+
+ if (!bio->bi_status && bio->bi_end_io == end_rectify_write &&
+ test_bit(R1BIO_MadeGood, &r1_bio->state)) {
+ rdev_clear_badblocks(rdev, r1_bio->sector,
+ r1_bio->sectors, 0);
+ }
+ }
+
+ md_done_sync(r1_bio->mddev, r1_bio->sectors);
+ put_buf(r1_bio);
+}
+
+static void handle_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
+{
+ if (test_bit(R1BIO_BadBlocksRectify, &r1_bio->state))
+ rectify_request_write(mddev, r1_bio);
+ else
+ sync_request_write(mddev, r1_bio);
+}
+
+static void __handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio);
+static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
+{
+ if (test_bit(R1BIO_BadBlocksRectify, &r1_bio->state))
+ handle_rectify_write_finished(conf, r1_bio);
+ else
+ __handle_sync_write_finished(conf, r1_bio);
+}
+
+static sector_t get_badblocks_sync_sectors(struct mddev *mddev, sector_t sector_nr,
+ int *skipped, unsigned long *bad_disks)
+{
+ struct r1conf *conf = mddev->private;
+ sector_t nr_sectors = mddev->dev_sectors - sector_nr;
+ bool all_faulty = true;
+ struct md_rdev *rdev;
+ bool good = false;
+ int i;
+
+ *skipped = 0;
+ for (i = 0; i < conf->raid_disks; i++) {
+ sector_t first_bad;
+ sector_t bad_sectors;
+
+ rdev = conf->mirrors[i].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ continue;
+
+ all_faulty = false;
+ if (is_badblock(rdev, sector_nr, nr_sectors, &first_bad, &bad_sectors)) {
+ if (first_bad <= sector_nr) {
+ set_bit(i, bad_disks);
+ nr_sectors = min(nr_sectors, first_bad + bad_sectors - sector_nr);
+ } else {
+ good = true;
+ nr_sectors = min(nr_sectors, first_bad - sector_nr);
+ }
+ } else {
+ good = true;
+ }
+ }
+
+ if (all_faulty) {
+ *skipped = 1;
+ return 0;
+ }
+
+ if (!good || !bitmap_weight(bad_disks, conf->raid_disks))
+ *skipped = 1;
+
+ /* make sure nr_sectors won't go across barrier unit boundary */
+ return align_to_barrier_unit_end(sector_nr, nr_sectors);
+}
+
+static sector_t get_next_sync_sector(struct mddev *mddev, sector_t sector_nr,
+ int *skipped, unsigned long *bad_disks)
+{
+ sector_t nr_sectors;
+
+ nr_sectors = get_badblocks_sync_sectors(mddev, sector_nr,
+ skipped, bad_disks);
+ if (!(*skipped) && nr_sectors > RESYNC_PAGES * (PAGE_SIZE >> 9))
+ nr_sectors = RESYNC_PAGES * (PAGE_SIZE >> 9);
+ return nr_sectors;
+}
+
+static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf);
+static struct r1bio *init_sync_badblocks_r1bio(struct mddev *mddev,
+ sector_t sector_nr,
+ sector_t nr_sectors,
+ unsigned long *bad_disks)
+{
+ struct r1conf *conf = mddev->private;
+ struct r1bio *r1_bio;
+ struct md_rdev *rdev;
+ int page_idx = 0;
+ struct bio *bio;
+ int i;
+
+ r1_bio = raid1_alloc_init_r1buf(conf);
+ r1_bio->mddev = mddev;
+ r1_bio->sector = sector_nr;
+ r1_bio->sectors = nr_sectors;
+ r1_bio->state = 0;
+ r1_bio->read_disk = -1;
+ set_bit(R1BIO_IsSync, &r1_bio->state);
+ set_bit(R1BIO_BadBlocksRectify, &r1_bio->state);
+
+ for (i = 0; i < conf->raid_disks; i++) {
+ rdev = conf->mirrors[i].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ continue;
+
+ if (r1_bio->read_disk < 0 && !test_bit(i, bad_disks))
+ r1_bio->read_disk = i;
+
+ bio = r1_bio->bios[i];
+ if (test_bit(i, bad_disks)) {
+ bio->bi_opf = REQ_OP_WRITE;
+ bio->bi_end_io = end_rectify_write;
+ } else {
+ bio->bi_opf = REQ_OP_READ;
+ bio->bi_end_io = end_rectify_read;
+ }
+
+ atomic_inc(&rdev->nr_pending);
+ bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
+ bio_set_dev(bio, rdev->bdev);
+ if (test_bit(FailFast, &rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
+ }
+
+ if (unlikely(r1_bio->read_disk < 0)) {
+ put_buf(r1_bio);
+ return NULL;
+ }
+
+ while (nr_sectors > 0 && page_idx < RESYNC_PAGES) {
+ int len = nr_sectors << 9 < PAGE_SIZE ?
+ nr_sectors << 9 : PAGE_SIZE;
+ struct resync_pages *rp;
+
+ for (i = 0; i < conf->raid_disks; i++) {
+ bio = r1_bio->bios[i];
+ rp = get_resync_pages(bio);
+ __bio_add_page(bio, resync_fetch_page(rp, page_idx), len, 0);
+ }
+
+ nr_sectors -= len >> 9;
+ page_idx++;
+ }
+
+ return r1_bio;
+}
+
+static sector_t __badblocks_rectify(struct mddev *mddev, sector_t sector_nr,
+ sector_t nr_sectors,
+ unsigned long *bad_disks)
+{
+ struct r1bio *r1_bio;
+
+ r1_bio = init_sync_badblocks_r1bio(mddev, sector_nr,
+ nr_sectors, bad_disks);
+ if (!r1_bio)
+ return 0;
+
+ submit_rectify_read(r1_bio);
+ return nr_sectors;
+}
+
+static sector_t do_sync_badblocks_rectify(struct mddev *mddev,
+ sector_t sector_nr, int *skipped)
+{
+ DECLARE_BITMAP(bad_disks, MAX_RAID_DISKS);
+ struct r1conf *conf = mddev->private;
+ sector_t nr_sectors;
+
+ bitmap_zero(bad_disks, MAX_RAID_DISKS);
+ nr_sectors = get_next_sync_sector(mddev, sector_nr, skipped, bad_disks);
+
+ if (*skipped) {
+ lower_barrier(conf, sector_nr);
+ return nr_sectors;
+ }
+
+ return __badblocks_rectify(mddev, sector_nr, nr_sectors, bad_disks);
+}
+
/*
* This is a kernel thread which:
*
@@ -2554,7 +2850,7 @@ static bool narrow_write_error(struct r1bio *r1_bio, int i)
return ok;
}
-static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
+static void __handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
{
int m;
int s = r1_bio->sectors;
@@ -2728,7 +3024,7 @@ static void raid1d(struct md_thread *thread)
test_bit(R1BIO_WriteError, &r1_bio->state))
handle_sync_write_finished(conf, r1_bio);
else
- sync_request_write(mddev, r1_bio);
+ handle_sync_write(mddev, r1_bio);
} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
test_bit(R1BIO_WriteError, &r1_bio->state))
handle_write_finished(conf, r1_bio);
@@ -2837,7 +3133,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
/* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything
*/
- if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
+ if (!is_badblocks_recovery_requested(mddev) &&
+ !md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */
*skipped = 1;
@@ -2863,6 +3160,9 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (raise_barrier(conf, sector_nr))
return 0;
+ if (is_badblocks_recovery_requested(mddev))
+ return do_sync_badblocks_rectify(mddev, sector_nr, skipped);
+
r1_bio = raid1_alloc_init_r1buf(conf);
/*
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index c98d43a7ae99..6ca8bf808d69 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -184,6 +184,7 @@ enum r1bio_state {
R1BIO_MadeGood,
R1BIO_WriteError,
R1BIO_FailFast,
+ R1BIO_BadBlocksRectify,
};
static inline int sector_to_idx(sector_t sector)
--
2.39.2
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks
2025-12-31 7:09 [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks Zheng Qixing
` (4 preceding siblings ...)
2025-12-31 7:09 ` [RFC PATCH 5/5] md/raid1: introduce rectify action to repair badblocks Zheng Qixing
@ 2025-12-31 11:11 ` Roman Mamedov
2026-01-06 2:44 ` Zheng Qixing
5 siblings, 1 reply; 21+ messages in thread
From: Roman Mamedov @ 2025-12-31 11:11 UTC (permalink / raw)
To: Zheng Qixing
Cc: song, yukuai, linux-raid, linux-kernel, yi.zhang, yangerkun,
houtao1, zhengqixing, linan122
On Wed, 31 Dec 2025 15:09:47 +0800
Zheng Qixing <zhengqixing@huaweicloud.com> wrote:
> From: Zheng Qixing <zhengqixing@huawei.com>
>
> In RAID1, some sectors may be marked as bad blocks due to I/O errors.
> In certain scenarios, these bad blocks might not be permanent, and
> issuing I/Os again could succeed.
>
> To address this situation, a new sync action ('rectify') introduced
> into RAID1 , allowing users to actively trigger the repair of existing
> bad blocks and clear it in sys bad_blocks.
>
> When echo rectify into /sys/block/md*/md/sync_action, a healthy disk is
> selected from the array to read data and then writes it to the disk where
> the bad block is located. If the write request succeeds, the bad block
> record can be cleared.
Could you also check here that it reads back successfully, and only then clear?
Otherwise there are cases when the block won't read even after rewriting it.
Side note, on some hardware it might be necessary to rewrite a larger area
around the problematic block, to finally trigger a remap. Not 512B, but at
least the native sector size, which is often 4K.
--
With respect,
Roman
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 4/5] md: introduce MAX_RAID_DISKS macro to replace magic number
2025-12-31 7:09 ` [RFC PATCH 4/5] md: introduce MAX_RAID_DISKS macro to replace magic number Zheng Qixing
@ 2025-12-31 18:00 ` John Stoffel
2026-01-04 2:06 ` Zheng Qixing
0 siblings, 1 reply; 21+ messages in thread
From: John Stoffel @ 2025-12-31 18:00 UTC (permalink / raw)
To: Zheng Qixing
Cc: song, yukuai, linux-raid, linux-kernel, yi.zhang, yangerkun,
houtao1, zhengqixing, linan122
>>>>> "Zheng" == Zheng Qixing <zhengqixing@huaweicloud.com> writes:
> From: Zheng Qixing <zhengqixing@huawei.com>
> Define MAX_RAID_DISKS macro for the maximum number of RAID disks.
> No functional change.
> Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
> ---
> drivers/md/md.c | 4 ++--
> drivers/md/md.h | 1 +
> 2 files changed, 3 insertions(+), 2 deletions(-)
> diff --git a/drivers/md/md.c b/drivers/md/md.c
> index 9eeab5258189..d2f136706f6c 100644
> --- a/drivers/md/md.c
> +++ b/drivers/md/md.c
> @@ -1888,7 +1888,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
> if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
sb-> major_version != cpu_to_le32(1) ||
> - le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
> + le32_to_cpu(sb->max_dev) > MAX_RAID_DISKS ||
> le64_to_cpu(sb->super_offset) != rdev->sb_start ||
> (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
> return -EINVAL;
> @@ -2065,7 +2065,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
mddev-> resync_offset = le64_to_cpu(sb->resync_offset);
> memcpy(mddev->uuid, sb->set_uuid, 16);
> - mddev->max_disks = (4096-256)/2;
> + mddev->max_disks = MAX_RAID_DISKS;
> if (!mddev->logical_block_size)
mddev-> logical_block_size = le32_to_cpu(sb->logical_block_size);
> diff --git a/drivers/md/md.h b/drivers/md/md.h
> index a083f37374d0..6a4af4a1959c 100644
> --- a/drivers/md/md.h
> +++ b/drivers/md/md.h
> @@ -22,6 +22,7 @@
> #include <trace/events/block.h>
> #define MaxSector (~(sector_t)0)
> +#define MAX_RAID_DISKS ((4096-256)/2)
Looks fine to me, except there's no explanation for the magic numbers
here. Sure, it's 1916 devices max, but WHY? Other than that nit,
looks fine.
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 4/5] md: introduce MAX_RAID_DISKS macro to replace magic number
2025-12-31 18:00 ` John Stoffel
@ 2026-01-04 2:06 ` Zheng Qixing
0 siblings, 0 replies; 21+ messages in thread
From: Zheng Qixing @ 2026-01-04 2:06 UTC (permalink / raw)
To: John Stoffel
Cc: song, yukuai, linux-raid, linux-kernel, yi.zhang, yangerkun,
houtao1, linan122, zhengqixing
在 2026/1/1 2:00, John Stoffel 写道:
>> #define MaxSector (~(sector_t)0)
>> +#define MAX_RAID_DISKS ((4096-256)/2)
> Looks fine to me, except there's no explanation for the magic numbers
> here. Sure, it's 1916 devices max, but WHY? Other than that nit,
> looks fine.
>
In include/uapi/linux/raid/md_p.h :
/*
* The version-1 superblock :
* All numeric fields are little-endian.
*
* total size: 256 bytes plus 2 per device.
* 1K allows 384 devices.
*/
struct mdp_superblock_1 {
1.x superblock:
Per-device state is stored as a __u16 dev_roles[] array (2 bytes per
device) plus a fixed 256-byte header, still within a 4 KiB superblock.
Therefore the theoretical maximum is (4096 - 256) / 2 = 1920 entries.
0.90 superblock (27 devices):
The superblock is fixed at 4 KiB (1024 32-bit words). It must store
several fixed sections (generic + personality) and also reserves space
for one “this_disk” descriptor in addition to the per-member disks[]
table. Each member descriptor consumes 32 words = 128 bytes. After
accounting for the fixed sections and the extra descriptor, the
remaining space fits exactly 27 member descriptors.
Best regards,
Qixing
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks
2025-12-31 11:11 ` [RFC PATCH 0/5] md/raid1: introduce a new sync " Roman Mamedov
@ 2026-01-06 2:44 ` Zheng Qixing
2026-01-06 13:50 ` Roman Mamedov
2026-01-06 15:36 ` Pascal Hambourg
0 siblings, 2 replies; 21+ messages in thread
From: Zheng Qixing @ 2026-01-06 2:44 UTC (permalink / raw)
To: Roman Mamedov
Cc: song, yukuai, linux-raid, linux-kernel, yi.zhang, yangerkun,
houtao1, linan122, zhengqixing
Hi,
在 2025/12/31 19:11, Roman Mamedov 写道:
> On Wed, 31 Dec 2025 15:09:47 +0800
> Zheng Qixing <zhengqixing@huaweicloud.com> wrote:
>
>> From: Zheng Qixing <zhengqixing@huawei.com>
>>
>> In RAID1, some sectors may be marked as bad blocks due to I/O errors.
>> In certain scenarios, these bad blocks might not be permanent, and
>> issuing I/Os again could succeed.
>>
>> To address this situation, a new sync action ('rectify') introduced
>> into RAID1 , allowing users to actively trigger the repair of existing
>> bad blocks and clear it in sys bad_blocks.
>>
>> When echo rectify into /sys/block/md*/md/sync_action, a healthy disk is
>> selected from the array to read data and then writes it to the disk where
>> the bad block is located. If the write request succeeds, the bad block
>> record can be cleared.
> Could you also check here that it reads back successfully, and only then clear?
>
> Otherwise there are cases when the block won't read even after rewriting it.
Thanks for your suggestions.
I'm a bit worried that reading the data again before clearing the bad
blocks might
affect the performance of the bad block repair process.
> Side note, on some hardware it might be necessary to rewrite a larger area
> around the problematic block, to finally trigger a remap. Not 512B, but at
> least the native sector size, which is often 4K.
Are you referring to the case where we have logical 512B sectors but
physical 4K sectors?
I'm not entirely clear on one aspect:
Can a physical 4K block have partial recovery (e.g., one 512B sector
succeeds while the other 7 fail)?
Thanks,
Qixing
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 1/5] md: add helpers for requested sync action
2025-12-31 7:09 ` [RFC PATCH 1/5] md: add helpers for requested sync action Zheng Qixing
@ 2026-01-06 12:59 ` Li Nan
2026-01-08 3:31 ` Zheng Qixing
0 siblings, 1 reply; 21+ messages in thread
From: Li Nan @ 2026-01-06 12:59 UTC (permalink / raw)
To: Zheng Qixing, song, yukuai
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1,
zhengqixing, linan122
在 2025/12/31 15:09, Zheng Qixing 写道:
> From: Zheng Qixing <zhengqixing@huawei.com>
>
> Add helpers for handling requested sync action.
>
> In handle_requested_sync_action(), add mutual exclusivity checks between
> check/repair operations. This prevents the scenario where one operation
> is requested, but before MD_RECOVERY_RUNNING is set, another operation is
> requested, resulting in neither an EBUSY return nor proper execution of
> the second operation.
>
> Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
> ---
> drivers/md/md.c | 87 +++++++++++++++++++++++++++++++++++++------------
> 1 file changed, 66 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/md/md.c b/drivers/md/md.c
> index 5df2220b1bd1..ccaa2e6fe079 100644
> --- a/drivers/md/md.c
> +++ b/drivers/md/md.c
> @@ -665,6 +665,59 @@ void mddev_put(struct mddev *mddev)
> spin_unlock(&all_mddevs_lock);
> }
>
> +static int __handle_requested_sync_action(struct mddev *mddev,
> + enum sync_action action)
> +{
> + switch (action) {
> + case ACTION_CHECK:
> + set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
> + fallthrough;
> + case ACTION_REPAIR:
> + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
> + set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
> + return 0;
> + default:
> + return -EINVAL;
> + } > +}
> +
> +static int handle_requested_sync_action(struct mddev *mddev,
> + enum sync_action action)
> +{
> + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
> + return -EBUSY;
This return change origin logic; split factor out and fix into two patches.
> + return __handle_requested_sync_action(mddev, action);
> +}
> +
__handle_requested_sync_action does not need to be split.
> +static enum sync_action __get_recovery_sync_action(struct mddev *mddev)
> +{
> + if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
> + return ACTION_CHECK;
> + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
> + return ACTION_REPAIR;
> + return ACTION_RESYNC;
> +}
> +
> +static enum sync_action get_recovery_sync_action(struct mddev *mddev)
> +{
> + return __get_recovery_sync_action(mddev);
> +}
> +
__get_recovery_sync_action also does not need to be split.
> +static void init_recovery_position(struct mddev *mddev)
> +{
> + mddev->resync_min = 0;
> +}
> +
> +static void set_requested_position(struct mddev *mddev, sector_t value)
> +{
> + mddev->resync_min = value;
> +}
> +
> +static sector_t get_requested_position(struct mddev *mddev)
> +{
> + return mddev->resync_min;
> +}
> +
There is no need to factor the operations of resync_min;
'rectify_min' that follows can re-use 'resync_min' directly.
> static void md_safemode_timeout(struct timer_list *t);
> static void md_start_sync(struct work_struct *ws);
>
> @@ -781,7 +834,7 @@ int mddev_init(struct mddev *mddev)
> mddev->reshape_position = MaxSector;
> mddev->reshape_backwards = 0;
> mddev->last_sync_action = ACTION_IDLE;
> - mddev->resync_min = 0;
> + init_recovery_position(mddev);
> mddev->resync_max = MaxSector;
> mddev->level = LEVEL_NONE;
>
> @@ -5101,17 +5154,9 @@ enum sync_action md_sync_action(struct mddev *mddev)
> if (test_bit(MD_RECOVERY_RECOVER, &recovery))
> return ACTION_RECOVER;
>
> - if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
> - /*
> - * MD_RECOVERY_CHECK must be paired with
> - * MD_RECOVERY_REQUESTED.
> - */
> - if (test_bit(MD_RECOVERY_CHECK, &recovery))
> - return ACTION_CHECK;
> - if (test_bit(MD_RECOVERY_REQUESTED, &recovery))
> - return ACTION_REPAIR;
> - return ACTION_RESYNC;
> - }
> + /* MD_RECOVERY_CHECK must be paired with MD_RECOVERY_REQUESTED. */
> + if (test_bit(MD_RECOVERY_SYNC, &recovery))
> + return get_recovery_sync_action(mddev);
>
> /*
> * MD_RECOVERY_NEEDED or MD_RECOVERY_RUNNING is set, however, no
> @@ -5300,11 +5345,10 @@ action_store(struct mddev *mddev, const char *page, size_t len)
> set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
> break;
> case ACTION_CHECK:
> - set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
> - fallthrough;
> case ACTION_REPAIR:
> - set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
> - set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
> + ret = handle_requested_sync_action(mddev, action);
> + if (ret)
> + goto out;
> fallthrough;
> case ACTION_RESYNC:
> case ACTION_IDLE:
> @@ -6783,7 +6827,7 @@ static void md_clean(struct mddev *mddev)
> mddev->dev_sectors = 0;
> mddev->raid_disks = 0;
> mddev->resync_offset = 0;
> - mddev->resync_min = 0;
> + init_recovery_position(mddev);
> mddev->resync_max = MaxSector;
> mddev->reshape_position = MaxSector;
> /* we still need mddev->external in export_rdev, do not clear it yet */
> @@ -9370,7 +9414,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
> switch (action) {
> case ACTION_CHECK:
> case ACTION_REPAIR:
> - return mddev->resync_min;
> + return get_requested_position(mddev);
> case ACTION_RESYNC:
> if (!mddev->bitmap)
> return mddev->resync_offset;
> @@ -9795,10 +9839,11 @@ void md_do_sync(struct md_thread *thread)
> if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
> /* We completed so min/max setting can be forgotten if used. */
> if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
> - mddev->resync_min = 0;
> + set_requested_position(mddev, 0);
> mddev->resync_max = MaxSector;
> - } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
> - mddev->resync_min = mddev->curr_resync_completed;
> + } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
> + set_requested_position(mddev, mddev->curr_resync_completed);
> + }
> set_bit(MD_RECOVERY_DONE, &mddev->recovery);
> mddev->curr_resync = MD_RESYNC_NONE;
> spin_unlock(&mddev->lock);
--
Thanks,
Nan
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 2/5] md: clear stale sync flags when frozen before sync starts
2025-12-31 7:09 ` [RFC PATCH 2/5] md: clear stale sync flags when frozen before sync starts Zheng Qixing
@ 2026-01-06 13:07 ` Li Nan
0 siblings, 0 replies; 21+ messages in thread
From: Li Nan @ 2026-01-06 13:07 UTC (permalink / raw)
To: Zheng Qixing, song, yukuai
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1,
zhengqixing, linan122
在 2025/12/31 15:09, Zheng Qixing 写道:
> From: Zheng Qixing <zhengqixing@huawei.com>
>
> In md_check_recovery(), add clearing of all sync flags when sync is not
> running. This fixes the issue where a sync operation is requested, then
> 'frozen' is executed before MD_RECOVERY_RUNNING is set, leaving stale
> operation flags that cause subsequent operations to fail with EBUSY.
>
> Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
> ---
> drivers/md/md.c | 3 +++
> 1 file changed, 3 insertions(+)
>
> diff --git a/drivers/md/md.c b/drivers/md/md.c
> index ccaa2e6fe079..52e09a9a9288 100644
> --- a/drivers/md/md.c
> +++ b/drivers/md/md.c
> @@ -10336,6 +10336,9 @@ void md_check_recovery(struct mddev *mddev)
> queue_work(md_misc_wq, &mddev->sync_work);
> } else {
> clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
> + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
> + clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
> + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
> wake_up(&resync_wait);
> }
>
Merge into one with the previous fix patch. Do not introduce an issue
and fix it later.
--
Thanks,
Nan
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 3/5] md: simplify sync action print in status_resync
2025-12-31 7:09 ` [RFC PATCH 3/5] md: simplify sync action print in status_resync Zheng Qixing
@ 2026-01-06 13:24 ` Li Nan
0 siblings, 0 replies; 21+ messages in thread
From: Li Nan @ 2026-01-06 13:24 UTC (permalink / raw)
To: Zheng Qixing, song, yukuai
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1,
zhengqixing, linan122
在 2025/12/31 15:09, Zheng Qixing 写道:
> From: Zheng Qixing <zhengqixing@huawei.com>
>
> No functional change, just code cleanup to make it easier to add new
> sync actions later.
>
> Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
> ---
> drivers/md/md.c | 16 +++++++++-------
> 1 file changed, 9 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/md/md.c b/drivers/md/md.c
> index 52e09a9a9288..9eeab5258189 100644
> --- a/drivers/md/md.c
> +++ b/drivers/md/md.c
> @@ -8684,6 +8684,8 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
> sector_t rt, curr_mark_cnt, resync_mark_cnt;
> int scale, recovery_active;
> unsigned int per_milli;
> + enum sync_action action;
> + const char *sync_action_name;
>
> if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
> test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
> @@ -8765,13 +8767,13 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
> seq_printf(seq, ".");
> seq_printf(seq, "] ");
> }
> - seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
> - (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
> - "reshape" :
> - (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
> - "check" :
> - (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
> - "resync" : "recovery"))),
> +
> + action = md_sync_action(mddev);
> + if (action == ACTION_RECOVER)
> + sync_action_name = "recovery";
Why not use md_sync_action_name for ACTION_RECOVER?
> + else
> + sync_action_name = md_sync_action_name(action);
> + seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", sync_action_name,
> per_milli/10, per_milli % 10,
> (unsigned long long) resync/2,
> (unsigned long long) max_sectors/2);
--
Thanks,
Nan
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 5/5] md/raid1: introduce rectify action to repair badblocks
2025-12-31 7:09 ` [RFC PATCH 5/5] md/raid1: introduce rectify action to repair badblocks Zheng Qixing
@ 2026-01-06 13:43 ` Li Nan
2026-01-19 2:51 ` Zheng Qixing
2026-01-14 3:11 ` Li Nan
1 sibling, 1 reply; 21+ messages in thread
From: Li Nan @ 2026-01-06 13:43 UTC (permalink / raw)
To: Zheng Qixing, song, yukuai
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1,
zhengqixing, linan122
在 2025/12/31 15:09, Zheng Qixing 写道:
> From: Zheng Qixing <zhengqixing@huawei.com>
>
> Add support for repairing known badblocks in RAID1. When disks
> have known badblocks (shown in sysfs bad_blocks), data can be
> read from other healthy disks in the array and written to repair
> the badblock areas and clear it in bad_blocks.
>
> echo rectify > sync_action can trigger this action.
>
> Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
> ---
> drivers/md/md.c | 80 ++++++++++--
> drivers/md/md.h | 16 +++
> drivers/md/raid1.c | 308 ++++++++++++++++++++++++++++++++++++++++++++-
> drivers/md/raid1.h | 1 +
> 4 files changed, 394 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/md/md.c b/drivers/md/md.c
> index d2f136706f6c..f5844cfb78fc 100644
> --- a/drivers/md/md.c
> +++ b/drivers/md/md.c
> @@ -74,6 +74,7 @@ static const char *action_name[NR_SYNC_ACTIONS] = {
> [ACTION_RECOVER] = "recover",
> [ACTION_CHECK] = "check",
> [ACTION_REPAIR] = "repair",
> + [ACTION_RECTIFY] = "rectify",
> [ACTION_RESHAPE] = "reshape",
> [ACTION_FROZEN] = "frozen",
> [ACTION_IDLE] = "idle",
> @@ -665,6 +666,29 @@ void mddev_put(struct mddev *mddev)
> spin_unlock(&all_mddevs_lock);
> }
>
> +static int raid1_badblocks_precheck(struct mddev *mddev)
This is a modification in md.c, do not use raid1_* for function names.
> +{
> + struct md_rdev *rdev;
> + int valid_disks = 0;
> + int ret = -EINVAL;
> +
> + if (mddev->level != 1) {
> + pr_err("md/raid1:%s requires raid1 array\n", mdname(mddev));
> + return -EINVAL;
> + }
Return error at sysfs write time, and add a comment explaining why other
level are not supported.
> +
> + rdev_for_each(rdev, mddev) {
> + if (rdev->raid_disk < 0 ||
> + test_bit(Faulty, &rdev->flags))
> + continue;
> + valid_disks++;
> + }
> + if (valid_disks >= 2)
> + ret = 0;
> +
> + return ret;
> +}
> +
> static int __handle_requested_sync_action(struct mddev *mddev,
> enum sync_action action)
> {
> @@ -684,9 +708,23 @@ static int __handle_requested_sync_action(struct mddev *mddev,
> static int handle_requested_sync_action(struct mddev *mddev,
> enum sync_action action)
> {
> + int ret;
> +
> if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
> return -EBUSY;
> - return __handle_requested_sync_action(mddev, action);
> +
> + switch (action) {
> + case ACTION_RECTIFY:
> + ret = raid1_badblocks_precheck(mddev);
> + if (ret)
> + return ret;
> + set_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery);
> + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
> + set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
> + return 0;
> + default:
> + return __handle_requested_sync_action(mddev, action);
> + }
> }
>
> static enum sync_action __get_recovery_sync_action(struct mddev *mddev)
> @@ -700,21 +738,34 @@ static enum sync_action __get_recovery_sync_action(struct mddev *mddev)
>
> static enum sync_action get_recovery_sync_action(struct mddev *mddev)
> {
> + if (test_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery))
> + return ACTION_RECTIFY;
> return __get_recovery_sync_action(mddev);
> }
>
> static void init_recovery_position(struct mddev *mddev)
> {
> mddev->resync_min = 0;
> + mddev->rectify_min = 0;
> +}
As mentioned in patch 1, can we directly reuse resync_min?
> +
> +static inline void clear_badblock_md_flags(struct mddev *mddev)
> +{
> + clear_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery);
> }
No need to factor.
>
> static void set_requested_position(struct mddev *mddev, sector_t value)
> {
> - mddev->resync_min = value;
> + if (test_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery))
> + mddev->rectify_min = value;
> + else
> + mddev->resync_min = value;
> }
>
> static sector_t get_requested_position(struct mddev *mddev)
> {
> + if (test_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery))
> + return mddev->rectify_min;
> return mddev->resync_min;
> }
>
> @@ -5154,7 +5205,10 @@ enum sync_action md_sync_action(struct mddev *mddev)
> if (test_bit(MD_RECOVERY_RECOVER, &recovery))
> return ACTION_RECOVER;
>
> - /* MD_RECOVERY_CHECK must be paired with MD_RECOVERY_REQUESTED. */
> + /*
> + * MD_RECOVERY_CHECK/MD_RECOVERY_BADBLOCKS_RECTIFY must be
Add spaces on both sides of '/'.
> + * paired with MD_RECOVERY_REQUESTED.
> + */
> if (test_bit(MD_RECOVERY_SYNC, &recovery))
> return get_recovery_sync_action(mddev);
>
> @@ -5319,6 +5373,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
> break;
> case ACTION_RESHAPE:
> case ACTION_RECOVER:
> + case ACTION_RECTIFY:
> case ACTION_CHECK:
> case ACTION_REPAIR:
> case ACTION_RESYNC:
> @@ -5344,6 +5399,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
> clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
> set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
> break;
> + case ACTION_RECTIFY:
> case ACTION_CHECK:
> case ACTION_REPAIR:
> ret = handle_requested_sync_action(mddev, action);
> @@ -9362,6 +9418,7 @@ static sector_t md_sync_max_sectors(struct mddev *mddev,
> {
> switch (action) {
> case ACTION_RESYNC:
> + case ACTION_RECTIFY:
> case ACTION_CHECK:
> case ACTION_REPAIR:
> atomic64_set(&mddev->resync_mismatches, 0);
> @@ -9414,6 +9471,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
> struct md_rdev *rdev;
>
> switch (action) {
> + case ACTION_RECTIFY:
> case ACTION_CHECK:
> case ACTION_REPAIR:
> return get_requested_position(mddev);
> @@ -10039,6 +10097,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
> clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
> clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
> clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
> + clear_badblock_md_flags(mddev);
>
> /* Start new recovery. */
> set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
> @@ -10096,10 +10155,14 @@ static void md_start_sync(struct work_struct *ws)
> if (spares && md_bitmap_enabled(mddev, true))
> mddev->bitmap_ops->write_all(mddev);
>
> - name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
> - "reshape" : "resync";
> - rcu_assign_pointer(mddev->sync_thread,
> - md_register_thread(md_do_sync, mddev, name));
> + if (!is_badblocks_recovery_requested(mddev) ||
> + !raid1_badblocks_precheck(mddev)) {
> + name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
> + "reshape" : "resync";
> + rcu_assign_pointer(mddev->sync_thread,
> + md_register_thread(md_do_sync, mddev, name));
> + }
> +
> if (!mddev->sync_thread) {
> pr_warn("%s: could not start resync thread...\n",
> mdname(mddev));
> @@ -10127,6 +10190,7 @@ static void md_start_sync(struct work_struct *ws)
> clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
> clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
> clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
> + clear_badblock_md_flags(mddev);
> mddev_unlock(mddev);
> /*
> * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
> @@ -10341,6 +10405,7 @@ void md_check_recovery(struct mddev *mddev)
> clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
> clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
> clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
> + clear_badblock_md_flags(mddev);
> wake_up(&resync_wait);
> }
>
> @@ -10391,6 +10456,7 @@ void md_reap_sync_thread(struct mddev *mddev)
> clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
> clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
> clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
> + clear_badblock_md_flags(mddev);
> clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
> /*
> * We call mddev->cluster_ops->update_size here because sync_size could
> diff --git a/drivers/md/md.h b/drivers/md/md.h
> index 6a4af4a1959c..58f320b19bba 100644
> --- a/drivers/md/md.h
> +++ b/drivers/md/md.h
> @@ -98,6 +98,13 @@ enum sync_action {
> * are inconsistent data,
> */
> ACTION_REPAIR,
> + /*
> + * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED |
> + * MD_RECOVERY_BADBLOCKS_RECTIFY, start when user echo "rectify"
> + * to sysfs api sync_action, used to repair the badblocks acked
> + * in bad table;
> + */
> + ACTION_RECTIFY,
> /*
> * Represent by MD_RECOVERY_RESHAPE, start when new member disk is added
> * to the conf, notice that this is different from spares or
> @@ -524,6 +531,7 @@ struct mddev {
> sector_t resync_offset;
> sector_t resync_min; /* user requested sync
> * starts here */
> + sector_t rectify_min;
> sector_t resync_max; /* resync should pause
> * when it gets here */
>
> @@ -664,6 +672,8 @@ enum recovery_flags {
> MD_RESYNCING_REMOTE,
> /* raid456 lazy initial recover */
> MD_RECOVERY_LAZY_RECOVER,
> + /* try to repair acked badblocks*/
> + MD_RECOVERY_BADBLOCKS_RECTIFY,
> };
>
> enum md_ro_state {
> @@ -1016,6 +1026,12 @@ static inline void mddev_unlock_and_resume(struct mddev *mddev)
> mddev_resume(mddev);
> }
>
> +static inline bool is_badblocks_recovery_requested(struct mddev *mddev)
> +{
> + return test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
> + test_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery);
> +}
> +
> struct mdu_array_info_s;
> struct mdu_disk_info_s;
>
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index 00120c86c443..f304161bc0ce 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -176,7 +176,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
> * If this is a user-requested check/repair, allocate
> * RESYNC_PAGES for each bio.
> */
> - if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery))
> + if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery) &&
> + !is_badblocks_recovery_requested(conf->mddev))
> need_pages = conf->raid_disks * 2;
> else
> need_pages = 1;
> @@ -2380,6 +2381,301 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
> put_sync_write_buf(r1_bio);
> }
>
> +static void end_rectify_read(struct bio *bio)
> +{
> + struct r1bio *r1_bio = get_resync_r1bio(bio);
> + struct r1conf *conf = r1_bio->mddev->private;
> + struct md_rdev *rdev;
> + struct bio *next_bio;
> + bool all_fail = true;
> + int i;
> +
> + update_head_pos(r1_bio->read_disk, r1_bio);
> +
> + if (!bio->bi_status) {
> + set_bit(R1BIO_Uptodate, &r1_bio->state);
> + goto out;
> + }
> +
> + for (i = r1_bio->read_disk + 1; i < conf->raid_disks; i++) {
> + rdev = conf->mirrors[i].rdev;
> + if (!rdev || test_bit(Faulty, &rdev->flags))
> + continue;
> +
> + next_bio = r1_bio->bios[i];
> + if (next_bio->bi_end_io == end_rectify_read) {
> + next_bio->bi_opf &= ~MD_FAILFAST;
> + r1_bio->read_disk = i;
> + all_fail = false;
> + break;
> + }
> + }
> +
> + if (unlikely(all_fail)) {
> + md_done_sync(r1_bio->mddev, r1_bio->sectors);
> + md_sync_error(r1_bio->mddev);
> + put_buf(r1_bio);
> + return;
> + }
> +out:
> + reschedule_retry(r1_bio);
> +}
> +
> +static void end_rectify_write(struct bio *bio)
> +{
> + struct r1bio *r1_bio = get_resync_r1bio(bio);
> +
> + if (atomic_dec_and_test(&r1_bio->remaining)) {
> + /*
> + * Rectify only attempts to clear acked bad
> + * blocks, and it does not set bad blocks in
> + * cases of R1BIO_WriteError.
> + * Here we reuse R1BIO_MadeGood flag, which
> + * does not guarantee that all write I/Os
> + * actually succeeded.
> + */
> + set_bit(R1BIO_MadeGood, &r1_bio->state);
> + reschedule_retry(r1_bio);
> + }
> +}
> +
> +static void submit_rectify_read(struct r1bio *r1_bio)
> +{
> + struct bio *bio;
> +
> + bio = r1_bio->bios[r1_bio->read_disk];
> + bio->bi_opf &= ~MD_FAILFAST;
> + bio->bi_status = 0;
> + submit_bio_noacct(bio);
> +}
> +
> +static void rectify_request_write(struct mddev *mddev, struct r1bio *r1_bio)
> +{
> + struct r1conf *conf = mddev->private;
> + struct bio *wbio = NULL;
> + int wcnt = 0;
> + int i;
> +
> + if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
> + submit_rectify_read(r1_bio);
> + return;
> + }
> +
> + atomic_set(&r1_bio->remaining, 0);
> + for (i = 0; i < conf->raid_disks; i++) {
> + wbio = r1_bio->bios[i];
> + if (wbio->bi_end_io == end_rectify_write) {
> + atomic_inc(&r1_bio->remaining);
> + wcnt++;
> + submit_bio_noacct(wbio);
> + }
> + }
> +
> + if (unlikely(!wcnt)) {
> + md_done_sync(r1_bio->mddev, r1_bio->sectors);
> + put_buf(r1_bio);
> + }
> +}
> +
> +static void handle_rectify_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
> +{
> + struct md_rdev *rdev;
> + struct bio *bio;
> + int i;
> +
> + for (i = 0; i < conf->raid_disks; i++) {
> + rdev = conf->mirrors[i].rdev;
> + bio = r1_bio->bios[i];
> + if (bio->bi_end_io == NULL)
> + continue;
> +
> + if (!bio->bi_status && bio->bi_end_io == end_rectify_write &&
> + test_bit(R1BIO_MadeGood, &r1_bio->state)) {
> + rdev_clear_badblocks(rdev, r1_bio->sector,
> + r1_bio->sectors, 0);
> + }
> + }
> +
> + md_done_sync(r1_bio->mddev, r1_bio->sectors);
> + put_buf(r1_bio);
> +}
> +
> +static void handle_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
> +{
> + if (test_bit(R1BIO_BadBlocksRectify, &r1_bio->state))
> + rectify_request_write(mddev, r1_bio);
> + else
> + sync_request_write(mddev, r1_bio);
> +}
> +
> +static void __handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio);
> +static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
> +{
> + if (test_bit(R1BIO_BadBlocksRectify, &r1_bio->state))
> + handle_rectify_write_finished(conf, r1_bio);
> + else
> + __handle_sync_write_finished(conf, r1_bio);
> +}
> +
> +static sector_t get_badblocks_sync_sectors(struct mddev *mddev, sector_t sector_nr,
> + int *skipped, unsigned long *bad_disks)
> +{
> + struct r1conf *conf = mddev->private;
> + sector_t nr_sectors = mddev->dev_sectors - sector_nr;
> + bool all_faulty = true;
> + struct md_rdev *rdev;
> + bool good = false;
> + int i;
> +
> + *skipped = 0;
> + for (i = 0; i < conf->raid_disks; i++) {
> + sector_t first_bad;
> + sector_t bad_sectors;
> +
> + rdev = conf->mirrors[i].rdev;
> + if (!rdev || test_bit(Faulty, &rdev->flags))
> + continue;
> +
> + all_faulty = false;
> + if (is_badblock(rdev, sector_nr, nr_sectors, &first_bad, &bad_sectors)) {
> + if (first_bad <= sector_nr) {
> + set_bit(i, bad_disks);
> + nr_sectors = min(nr_sectors, first_bad + bad_sectors - sector_nr);
> + } else {
> + good = true;
> + nr_sectors = min(nr_sectors, first_bad - sector_nr);
> + }
> + } else {
> + good = true;
> + }
> + }
> +
> + if (all_faulty) {
> + *skipped = 1;
> + return 0;
> + }
> +
> + if (!good || !bitmap_weight(bad_disks, conf->raid_disks))
> + *skipped = 1;
> +
> + /* make sure nr_sectors won't go across barrier unit boundary */
> + return align_to_barrier_unit_end(sector_nr, nr_sectors);
> +}
> +
> +static sector_t get_next_sync_sector(struct mddev *mddev, sector_t sector_nr,
> + int *skipped, unsigned long *bad_disks)
> +{
> + sector_t nr_sectors;
> +
> + nr_sectors = get_badblocks_sync_sectors(mddev, sector_nr,
> + skipped, bad_disks);
> + if (!(*skipped) && nr_sectors > RESYNC_PAGES * (PAGE_SIZE >> 9))
> + nr_sectors = RESYNC_PAGES * (PAGE_SIZE >> 9);
> + return nr_sectors;
> +}
> +
> +static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf);
> +static struct r1bio *init_sync_badblocks_r1bio(struct mddev *mddev,
> + sector_t sector_nr,
> + sector_t nr_sectors,
> + unsigned long *bad_disks)
> +{
> + struct r1conf *conf = mddev->private;
> + struct r1bio *r1_bio;
> + struct md_rdev *rdev;
> + int page_idx = 0;
> + struct bio *bio;
> + int i;
> +
> + r1_bio = raid1_alloc_init_r1buf(conf);
> + r1_bio->mddev = mddev;
> + r1_bio->sector = sector_nr;
> + r1_bio->sectors = nr_sectors;
> + r1_bio->state = 0;
> + r1_bio->read_disk = -1;
> + set_bit(R1BIO_IsSync, &r1_bio->state);
> + set_bit(R1BIO_BadBlocksRectify, &r1_bio->state);
> +
> + for (i = 0; i < conf->raid_disks; i++) {
> + rdev = conf->mirrors[i].rdev;
> + if (!rdev || test_bit(Faulty, &rdev->flags))
> + continue;
> +
> + if (r1_bio->read_disk < 0 && !test_bit(i, bad_disks))
> + r1_bio->read_disk = i;
> +
> + bio = r1_bio->bios[i];
> + if (test_bit(i, bad_disks)) {
> + bio->bi_opf = REQ_OP_WRITE;
> + bio->bi_end_io = end_rectify_write;
> + } else {
> + bio->bi_opf = REQ_OP_READ;
> + bio->bi_end_io = end_rectify_read;
> + }
> +
> + atomic_inc(&rdev->nr_pending);
> + bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
> + bio_set_dev(bio, rdev->bdev);
> + if (test_bit(FailFast, &rdev->flags))
> + bio->bi_opf |= MD_FAILFAST;
> + }
> +
> + if (unlikely(r1_bio->read_disk < 0)) {
> + put_buf(r1_bio);
> + return NULL;
> + }
> +
> + while (nr_sectors > 0 && page_idx < RESYNC_PAGES) {
> + int len = nr_sectors << 9 < PAGE_SIZE ?
> + nr_sectors << 9 : PAGE_SIZE;
> + struct resync_pages *rp;
> +
> + for (i = 0; i < conf->raid_disks; i++) {
> + bio = r1_bio->bios[i];
> + rp = get_resync_pages(bio);
> + __bio_add_page(bio, resync_fetch_page(rp, page_idx), len, 0);
> + }
> +
> + nr_sectors -= len >> 9;
> + page_idx++;
> + }
> +
> + return r1_bio;
> +}
> +
> +static sector_t __badblocks_rectify(struct mddev *mddev, sector_t sector_nr,
> + sector_t nr_sectors,
> + unsigned long *bad_disks)
> +{
> + struct r1bio *r1_bio;
> +
> + r1_bio = init_sync_badblocks_r1bio(mddev, sector_nr,
> + nr_sectors, bad_disks);
> + if (!r1_bio)
> + return 0;
> +
> + submit_rectify_read(r1_bio);
> + return nr_sectors;
> +}
No need to factor.
> +
> +static sector_t do_sync_badblocks_rectify(struct mddev *mddev,
> + sector_t sector_nr, int *skipped)
> +{
> + DECLARE_BITMAP(bad_disks, MAX_RAID_DISKS);
> + struct r1conf *conf = mddev->private;
> + sector_t nr_sectors;
> +
> + bitmap_zero(bad_disks, MAX_RAID_DISKS);
> + nr_sectors = get_next_sync_sector(mddev, sector_nr, skipped, bad_disks);
> +
> + if (*skipped) {
> + lower_barrier(conf, sector_nr);
> + return nr_sectors;
> + }
> +
> + return __badblocks_rectify(mddev, sector_nr, nr_sectors, bad_disks);
> +}
> +
> /*
> * This is a kernel thread which:
> *
> @@ -2554,7 +2850,7 @@ static bool narrow_write_error(struct r1bio *r1_bio, int i)
> return ok;
> }
>
> -static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
> +static void __handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
> {
> int m;
> int s = r1_bio->sectors;
> @@ -2728,7 +3024,7 @@ static void raid1d(struct md_thread *thread)
> test_bit(R1BIO_WriteError, &r1_bio->state))
> handle_sync_write_finished(conf, r1_bio);
> else
> - sync_request_write(mddev, r1_bio);
> + handle_sync_write(mddev, r1_bio);
> } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
> test_bit(R1BIO_WriteError, &r1_bio->state))
> handle_write_finished(conf, r1_bio);
> @@ -2837,7 +3133,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
> /* before building a request, check if we can skip these blocks..
> * This call the bitmap_start_sync doesn't actually record anything
> */
> - if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
> + if (!is_badblocks_recovery_requested(mddev) &&
> + !md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
> !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
> /* We can skip this block, and probably several more */
> *skipped = 1;
> @@ -2863,6 +3160,9 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
> if (raise_barrier(conf, sector_nr))
> return 0;
>
> + if (is_badblocks_recovery_requested(mddev))
> + return do_sync_badblocks_rectify(mddev, sector_nr, skipped);
> +
> r1_bio = raid1_alloc_init_r1buf(conf);
>
> /*
> diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
> index c98d43a7ae99..6ca8bf808d69 100644
> --- a/drivers/md/raid1.h
> +++ b/drivers/md/raid1.h
> @@ -184,6 +184,7 @@ enum r1bio_state {
> R1BIO_MadeGood,
> R1BIO_WriteError,
> R1BIO_FailFast,
> + R1BIO_BadBlocksRectify,
> };
>
> static inline int sector_to_idx(sector_t sector)
--
Thanks,
Nan
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks
2026-01-06 2:44 ` Zheng Qixing
@ 2026-01-06 13:50 ` Roman Mamedov
2026-01-06 15:36 ` Pascal Hambourg
1 sibling, 0 replies; 21+ messages in thread
From: Roman Mamedov @ 2026-01-06 13:50 UTC (permalink / raw)
To: Zheng Qixing
Cc: song, yukuai, linux-raid, linux-kernel, yi.zhang, yangerkun,
houtao1, linan122, zhengqixing
On Tue, 6 Jan 2026 10:44:38 +0800
Zheng Qixing <zhengqixing@huaweicloud.com> wrote:
> Are you referring to the case where we have logical 512B sectors but
> physical 4K sectors?
At least that, yes. Such rewriting of bad blocks should happen at least at the
physical sector granularity.
But from my limited experience it feels like the badblock recovery algorithm
in hard drives, in addition to being opaque and proprietary, also highly
indeterministic and possibly buggy. In one case it would take REPEATEDLY
overwriting a full megabyte around a bad block to finally make the drive remap
it. (Maybe less than a megabyte would do, but overwriting only 4K - didn't).
Of course I understand such endeavors are outside of scope for mdraid, hence
it was just a side note.
--
With respect,
Roman
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks
2026-01-06 2:44 ` Zheng Qixing
2026-01-06 13:50 ` Roman Mamedov
@ 2026-01-06 15:36 ` Pascal Hambourg
2026-01-08 1:32 ` Zheng Qixing
1 sibling, 1 reply; 21+ messages in thread
From: Pascal Hambourg @ 2026-01-06 15:36 UTC (permalink / raw)
To: Zheng Qixing, Roman Mamedov
Cc: song, yukuai, linux-raid, linux-kernel, yi.zhang, yangerkun,
houtao1, linan122, zhengqixing
On 06/01/2026 at 03:44, Zheng Qixing wrote:
> 在 2025/12/31 19:11, Roman Mamedov 写道:
>> On Wed, 31 Dec 2025 15:09:47 +0800
>>
>> Could you also check here that it reads back successfully, and only
>> then clear?
>>
>> Otherwise there are cases when the block won't read even after
>> rewriting it.
I confirm. The rewrite is reported successful but SMART reallocation
attributes did not change and a further read still fails.
> I'm a bit worried that reading the data again before clearing the bad
> blocks might affect the performance of the bad block repair process.
Isn't it more worrying to clear bad blocks while they may still be bad ?
Bad blocks should be rare anyway, so performance impact should be low.
>> Side note, on some hardware it might be necessary to rewrite a larger area
>> around the problematic block, to finally trigger a remap. Not 512B, but at
>> least the native sector size, which is often 4K.
>
> Are you referring to the case where we have logical 512B sectors but
> physical 4K sectors?
Yes. Writing a single logical sector implies a read-modify-write of the
whole underlying physical sector and will not complete if the read fails.
> Can a physical 4K block have partial recovery (e.g., one 512B sector
> succeeds while the other 7 fail)?
Not in my experience. There seems to be a single ECC for the whole
physical sector.
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks
2026-01-06 15:36 ` Pascal Hambourg
@ 2026-01-08 1:32 ` Zheng Qixing
0 siblings, 0 replies; 21+ messages in thread
From: Zheng Qixing @ 2026-01-08 1:32 UTC (permalink / raw)
To: Pascal Hambourg, Roman Mamedov
Cc: song, yukuai, linux-raid, linux-kernel, yi.zhang, yangerkun,
houtao1, linan122, zhengqixing
在 2026/1/6 23:36, Pascal Hambourg 写道:
> On 06/01/2026 at 03:44, Zheng Qixing wrote:
>> 在 2025/12/31 19:11, Roman Mamedov 写道:
>>> On Wed, 31 Dec 2025 15:09:47 +0800
>>>
>>> Could you also check here that it reads back successfully, and only
>>> then clear?
>>>
>>> Otherwise there are cases when the block won't read even after
>>> rewriting it.
>
> I confirm. The rewrite is reported successful but SMART reallocation
> attributes did not change and a further read still fails.
>
>> I'm a bit worried that reading the data again before clearing the bad
>> blocks might affect the performance of the bad block repair process.
>
> Isn't it more worrying to clear bad blocks while they may still be bad ?
> Bad blocks should be rare anyway, so performance impact should be low.
>
>>> Side note, on some hardware it might be necessary to rewrite a
>>> larger area
>>> around the problematic block, to finally trigger a remap. Not 512B,
>>> but at
>>> least the native sector size, which is often 4K.
>>
>> Are you referring to the case where we have logical 512B sectors but
>> physical 4K sectors?
>
> Yes. Writing a single logical sector implies a read-modify-write of
> the whole underlying physical sector and will not complete if the read
> fails.
That makes sense. I will change it in the next version.
>
>> Can a physical 4K block have partial recovery (e.g., one 512B sector
>> succeeds while the other 7 fail)?
>
> Not in my experience. There seems to be a single ECC for the whole
> physical sector.
I will try to test with disks that have lbs=512 and pbs=4096.
If 512B IOs can be successfully issued, then the bad block repair logic
does need to
consider the minimum repair length and alignment logic.
Thanks,
Qixing
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 1/5] md: add helpers for requested sync action
2026-01-06 12:59 ` Li Nan
@ 2026-01-08 3:31 ` Zheng Qixing
0 siblings, 0 replies; 21+ messages in thread
From: Zheng Qixing @ 2026-01-08 3:31 UTC (permalink / raw)
To: Li Nan
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1, linan122,
Song Liu, yukuai, zhengqixing
在 2026/1/6 20:59, Li Nan 写道:
>>
>> +
>> +static int handle_requested_sync_action(struct mddev *mddev,
>> + enum sync_action action)
>> +{
>> + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
>> + return -EBUSY;
>
> This return change origin logic; split factor out and fix into two
> patches.
Make sense to me.
>
>> + return __handle_requested_sync_action(mddev, action);
>> +}
>> +
>
> __handle_requested_sync_action does not need to be split.
>
>> +static enum sync_action __get_recovery_sync_action(struct mddev *mddev)
>> +{
>> + if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
>> + return ACTION_CHECK;
>> + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
>> + return ACTION_REPAIR;
>> + return ACTION_RESYNC;
>> +}
>> +
>> +static enum sync_action get_recovery_sync_action(struct mddev *mddev)
>> +{
>> + return __get_recovery_sync_action(mddev);
>> +}
>> +
>
> __get_recovery_sync_action also does not need to be split.
>
Okay.
>
>> +static void init_recovery_position(struct mddev *mddev)
>> +{
>> + mddev->resync_min = 0;
>> +}
>> +
>> +static void set_requested_position(struct mddev *mddev, sector_t value)
>> +{
>> + mddev->resync_min = value;
>> +}
>> +
>> +static sector_t get_requested_position(struct mddev *mddev)
>> +{
>> + return mddev->resync_min;
>> +}
>> +
>
> There is no need to factor the operations of resync_min;
> 'rectify_min' that follows can re-use 'resync_min' directly.
>
If we share resync_min with check/repair, bad blocks may be missed
during repair:
When check/repair is halfway through execution and then frozen,
followed by a rectify operation, any bad blocks that exist before
resync_min will not be repaired. This would require an additional
rectify operation.
Thanks,
Qixing
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 5/5] md/raid1: introduce rectify action to repair badblocks
2025-12-31 7:09 ` [RFC PATCH 5/5] md/raid1: introduce rectify action to repair badblocks Zheng Qixing
2026-01-06 13:43 ` Li Nan
@ 2026-01-14 3:11 ` Li Nan
2026-01-19 3:06 ` Zheng Qixing
1 sibling, 1 reply; 21+ messages in thread
From: Li Nan @ 2026-01-14 3:11 UTC (permalink / raw)
To: Zheng Qixing, song, yukuai
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1,
zhengqixing, linan122
在 2025/12/31 15:09, Zheng Qixing 写道:
> From: Zheng Qixing <zhengqixing@huawei.com>
>
> Add support for repairing known badblocks in RAID1. When disks
> have known badblocks (shown in sysfs bad_blocks), data can be
> read from other healthy disks in the array and written to repair
> the badblock areas and clear it in bad_blocks.
>
> echo rectify > sync_action can trigger this action.
>
> Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
> +static void end_rectify_read(struct bio *bio)
> +{
> + struct r1bio *r1_bio = get_resync_r1bio(bio);
> + struct r1conf *conf = r1_bio->mddev->private;
> + struct md_rdev *rdev;
> + struct bio *next_bio;
> + bool all_fail = true;
> + int i;
> +
> + update_head_pos(r1_bio->read_disk, r1_bio);
> +
> + if (!bio->bi_status) {
> + set_bit(R1BIO_Uptodate, &r1_bio->state);
> + goto out;
> + }
> +
> + for (i = r1_bio->read_disk + 1; i < conf->raid_disks; i++) {
> + rdev = conf->mirrors[i].rdev;
> + if (!rdev || test_bit(Faulty, &rdev->flags))
> + continue;
> +
> + next_bio = r1_bio->bios[i];
> + if (next_bio->bi_end_io == end_rectify_read) {
> + next_bio->bi_opf &= ~MD_FAILFAST;
Why set MD_FAILFAST and clear it soon?
And submit_rectify_read() will clear it again.
> +static void rectify_request_write(struct mddev *mddev, struct r1bio *r1_bio)
> +{
> + struct r1conf *conf = mddev->private;
> + struct bio *wbio = NULL;
> + int wcnt = 0;
> + int i;
> +
> + if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
> + submit_rectify_read(r1_bio);
> + return;
> + }
> +
> + atomic_set(&r1_bio->remaining, 0);
> + for (i = 0; i < conf->raid_disks; i++) {
> + wbio = r1_bio->bios[i];
> + if (wbio->bi_end_io == end_rectify_write) {
> + atomic_inc(&r1_bio->remaining);
> + wcnt++;
> + submit_bio_noacct(wbio);
> + }
> + }
> +
> + if (unlikely(!wcnt)) {
> + md_done_sync(r1_bio->mddev, r1_bio->sectors);
> + put_buf(r1_bio);
> + }
How can 'wcnt' be 0?
> +}
> +
> +static void handle_rectify_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
> +{
> + struct md_rdev *rdev;
> + struct bio *bio;
> + int i;
> +
> + for (i = 0; i < conf->raid_disks; i++) {
> + rdev = conf->mirrors[i].rdev;
> + bio = r1_bio->bios[i];
> + if (bio->bi_end_io == NULL)
> + continue;
> +
> + if (!bio->bi_status && bio->bi_end_io == end_rectify_write &&
> + test_bit(R1BIO_MadeGood, &r1_bio->state)) {
> + rdev_clear_badblocks(rdev, r1_bio->sector,
> + r1_bio->sectors, 0);
> + }
Reuse handle_sync_write_finished() seems better.
> + }
> +
> + md_done_sync(r1_bio->mddev, r1_bio->sectors);
> + put_buf(r1_bio);
> +}
> +
--
Thanks,
Nan
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 5/5] md/raid1: introduce rectify action to repair badblocks
2026-01-06 13:43 ` Li Nan
@ 2026-01-19 2:51 ` Zheng Qixing
0 siblings, 0 replies; 21+ messages in thread
From: Zheng Qixing @ 2026-01-19 2:51 UTC (permalink / raw)
To: Li Nan
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1, linan122,
song, yukuai, Zheng Qixing
Hi,
>> @@ -700,21 +738,34 @@ static enum sync_action __get_recovery_sync_action(struct mddev *mddev)
>> static enum sync_action get_recovery_sync_action(struct mddev *mddev)
>> {
>> + if (test_bit(MD_RECOVERY_BADBLOCKS_RECTIFY, &mddev->recovery))
>> + return ACTION_RECTIFY;
>> return __get_recovery_sync_action(mddev);
>> }
>> static void init_recovery_position(struct mddev *mddev)
>> {
>> mddev->resync_min = 0;
>> + mddev->rectify_min = 0;
>> +}
>
> As mentioned in patch 1, can we directly reuse resync_min?
To keep rectify's progress separate from check and repair, it's better to use
a dedicated variable to record it.
I'll update the other suggestions in v2.
Thanks,
Qixing
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [RFC PATCH 5/5] md/raid1: introduce rectify action to repair badblocks
2026-01-14 3:11 ` Li Nan
@ 2026-01-19 3:06 ` Zheng Qixing
0 siblings, 0 replies; 21+ messages in thread
From: Zheng Qixing @ 2026-01-19 3:06 UTC (permalink / raw)
To: Li Nan
Cc: linux-raid, linux-kernel, yi.zhang, yangerkun, houtao1, linan122,
song, yukuai, Zheng Qixing
在 2026/1/14 11:11, Li Nan 写道:
>
>
> 在 2025/12/31 15:09, Zheng Qixing 写道:
>> From: Zheng Qixing <zhengqixing@huawei.com>
>>
>> Add support for repairing known badblocks in RAID1. When disks
>> have known badblocks (shown in sysfs bad_blocks), data can be
>> read from other healthy disks in the array and written to repair
>> the badblock areas and clear it in bad_blocks.
>>
>> echo rectify > sync_action can trigger this action.
>>
>> Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
>
>> +static void end_rectify_read(struct bio *bio)
>> +{
>> + struct r1bio *r1_bio = get_resync_r1bio(bio);
>> + struct r1conf *conf = r1_bio->mddev->private;
>> + struct md_rdev *rdev;
>> + struct bio *next_bio;
>> + bool all_fail = true;
>> + int i;
>> +
>> + update_head_pos(r1_bio->read_disk, r1_bio);
>> +
>> + if (!bio->bi_status) {
>> + set_bit(R1BIO_Uptodate, &r1_bio->state);
>> + goto out;
>> + }
>> +
>> + for (i = r1_bio->read_disk + 1; i < conf->raid_disks; i++) {
>> + rdev = conf->mirrors[i].rdev;
>> + if (!rdev || test_bit(Faulty, &rdev->flags))
>> + continue;
>> +
>> + next_bio = r1_bio->bios[i];
>> + if (next_bio->bi_end_io == end_rectify_read) {
>> + next_bio->bi_opf &= ~MD_FAILFAST;
>
> Why set MD_FAILFAST and clear it soon?
> And submit_rectify_read() will clear it again.
Indeed.
>
>> +static void rectify_request_write(struct mddev *mddev, struct r1bio *r1_bio)
>> +{
>> + struct r1conf *conf = mddev->private;
>> + struct bio *wbio = NULL;
>> + int wcnt = 0;
>> + int i;
>> +
>> + if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
>> + submit_rectify_read(r1_bio);
>> + return;
>> + }
>> +
>> + atomic_set(&r1_bio->remaining, 0);
>> + for (i = 0; i < conf->raid_disks; i++) {
>> + wbio = r1_bio->bios[i];
>> + if (wbio->bi_end_io == end_rectify_write) {
>> + atomic_inc(&r1_bio->remaining);
>> + wcnt++;
>> + submit_bio_noacct(wbio);
>> + }
>> + }
>> +
>> + if (unlikely(!wcnt)) {
>> + md_done_sync(r1_bio->mddev, r1_bio->sectors);
>> + put_buf(r1_bio);
>> + }
>
> How can 'wcnt' be 0?
Oh, I forgot to check the faulty state of rdev.😳
>
>> +}
>> +
>> +static void handle_rectify_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
>> +{
>> + struct md_rdev *rdev;
>> + struct bio *bio;
>> + int i;
>> +
>> + for (i = 0; i < conf->raid_disks; i++) {
>> + rdev = conf->mirrors[i].rdev;
>> + bio = r1_bio->bios[i];
>> + if (bio->bi_end_io == NULL)
>> + continue;
>> +
>> + if (!bio->bi_status && bio->bi_end_io == end_rectify_write &&
>> + test_bit(R1BIO_MadeGood, &r1_bio->state)) {
>> + rdev_clear_badblocks(rdev, r1_bio->sector,
>> + r1_bio->sectors, 0);
>> + }
>
> Reuse handle_sync_write_finished() seems better.
Good point.
Thanks,
Qixing
^ permalink raw reply [flat|nested] 21+ messages in thread
end of thread, other threads:[~2026-01-19 3:06 UTC | newest]
Thread overview: 21+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-12-31 7:09 [RFC PATCH 0/5] md/raid1: introduce a new sync action to repair badblocks Zheng Qixing
2025-12-31 7:09 ` [RFC PATCH 1/5] md: add helpers for requested sync action Zheng Qixing
2026-01-06 12:59 ` Li Nan
2026-01-08 3:31 ` Zheng Qixing
2025-12-31 7:09 ` [RFC PATCH 2/5] md: clear stale sync flags when frozen before sync starts Zheng Qixing
2026-01-06 13:07 ` Li Nan
2025-12-31 7:09 ` [RFC PATCH 3/5] md: simplify sync action print in status_resync Zheng Qixing
2026-01-06 13:24 ` Li Nan
2025-12-31 7:09 ` [RFC PATCH 4/5] md: introduce MAX_RAID_DISKS macro to replace magic number Zheng Qixing
2025-12-31 18:00 ` John Stoffel
2026-01-04 2:06 ` Zheng Qixing
2025-12-31 7:09 ` [RFC PATCH 5/5] md/raid1: introduce rectify action to repair badblocks Zheng Qixing
2026-01-06 13:43 ` Li Nan
2026-01-19 2:51 ` Zheng Qixing
2026-01-14 3:11 ` Li Nan
2026-01-19 3:06 ` Zheng Qixing
2025-12-31 11:11 ` [RFC PATCH 0/5] md/raid1: introduce a new sync " Roman Mamedov
2026-01-06 2:44 ` Zheng Qixing
2026-01-06 13:50 ` Roman Mamedov
2026-01-06 15:36 ` Pascal Hambourg
2026-01-08 1:32 ` Zheng Qixing
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox