* [patch v2 01/11] MD: replace special disk roles with macros
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
@ 2015-08-13 21:31 ` Shaohua Li
2015-08-13 21:31 ` [patch v2 02/11] MD: add a new disk role to present write journal device Shaohua Li
` (10 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Shaohua Li @ 2015-08-13 21:31 UTC (permalink / raw)
To: linux-raid; +Cc: Kernel-team, songliubraving, hch, dan.j.williams, neilb
From: Song Liu <songliubraving@fb.com>
Add the following two macros for special roles: spare and faulty
MD_DISK_ROLE_SPARE 0xffff
MD_DISK_ROLE_FAULTY 0xfffe
Add MD_DISK_ROLE_MAX 0xff00 as the maximal possible regular role,
and minimal value of special role.
Signed-off-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Shaohua Li <shli@fb.com>
---
drivers/md/md.c | 14 +++++++-------
include/uapi/linux/raid/md_p.h | 4 ++++
2 files changed, 11 insertions(+), 7 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e25f00f..01f9fa0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1626,7 +1626,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
++ev1;
if (rdev->desc_nr >= 0 &&
rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
- le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
+ le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX)
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
@@ -1646,14 +1646,14 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
int role;
if (rdev->desc_nr < 0 ||
rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
- role = 0xffff;
+ role = MD_DISK_ROLE_SPARE;
rdev->desc_nr = -1;
} else
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
switch(role) {
- case 0xffff: /* spare */
+ case MD_DISK_ROLE_SPARE: /* spare */
break;
- case 0xfffe: /* faulty */
+ case MD_DISK_ROLE_FAULTY: /* faulty */
set_bit(Faulty, &rdev->flags);
break;
default:
@@ -1803,18 +1803,18 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
max_dev = le32_to_cpu(sb->max_dev);
for (i=0; i<max_dev;i++)
- sb->dev_roles[i] = cpu_to_le16(0xfffe);
+ sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
rdev_for_each(rdev2, mddev) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
- sb->dev_roles[i] = cpu_to_le16(0xfffe);
+ sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else if (rdev2->raid_disk >= 0)
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
- sb->dev_roles[i] = cpu_to_le16(0xffff);
+ sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
}
sb->sb_csum = calc_sb_1_csum(sb);
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index 2ae6131..3105190 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -90,6 +90,10 @@
* dire need
*/
+#define MD_DISK_ROLE_SPARE 0xffff
+#define MD_DISK_ROLE_FAULTY 0xfffe
+#define MD_DISK_ROLE_MAX 0xff00 /* max value of regular disk role */
+
typedef struct mdp_device_descriptor_s {
__u32 number; /* 0 Device number in the entire set */
__u32 major; /* 1 Device major number */
--
1.8.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [patch v2 02/11] MD: add a new disk role to present write journal device
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
2015-08-13 21:31 ` [patch v2 01/11] MD: replace special disk roles with macros Shaohua Li
@ 2015-08-13 21:31 ` Shaohua Li
2015-08-13 21:31 ` [patch v2 03/11] md: override md superblock recovery_offset for " Shaohua Li
` (9 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Shaohua Li @ 2015-08-13 21:31 UTC (permalink / raw)
To: linux-raid; +Cc: Kernel-team, songliubraving, hch, dan.j.williams, neilb
From: Song Liu <songliubraving@fb.com>
Next patches will use a disk as raid5/6 journaling. We need a new disk
role to present the journal device and add MD_FEATURE_JOURNAL to
feature_map for backward compability.
Signed-off-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Shaohua Li <shli@fb.com>
---
drivers/md/md.c | 24 ++++++++++++++++++++++--
drivers/md/md.h | 5 +++++
include/uapi/linux/raid/md_p.h | 3 +++
3 files changed, 30 insertions(+), 2 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 01f9fa0..7667cc1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1656,6 +1656,16 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
case MD_DISK_ROLE_FAULTY: /* faulty */
set_bit(Faulty, &rdev->flags);
break;
+ case MD_DISK_ROLE_JOURNAL: /* journal device */
+ if (!(sb->feature_map & MD_FEATURE_JOURNAL)) {
+ /* journal device without journal feature */
+ printk(KERN_WARNING
+ "md: journal device provided without "
+ "journal feature, ignoring the device\n");
+ return -EINVAL;
+ }
+ set_bit(Journal, &rdev->flags);
+ break;
default:
rdev->saved_raid_disk = role;
if ((le32_to_cpu(sb->feature_map) &
@@ -1811,7 +1821,10 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
- else if (rdev2->raid_disk >= 0)
+ else if (test_bit(Journal, &rdev2->flags)) {
+ sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
+ sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
+ } else if (rdev2->raid_disk >= 0)
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
@@ -5805,7 +5818,8 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
else if (test_bit(In_sync, &rdev->flags)) {
info.state |= (1<<MD_DISK_ACTIVE);
info.state |= (1<<MD_DISK_SYNC);
- }
+ } else if (test_bit(Journal, &rdev->flags))
+ info.state |= (1<<MD_DISK_JOURNAL);
if (test_bit(WriteMostly, &rdev->flags))
info.state |= (1<<MD_DISK_WRITEMOSTLY);
} else {
@@ -5920,6 +5934,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
else
clear_bit(WriteMostly, &rdev->flags);
+ if (info->state & (1<<MD_DISK_JOURNAL))
+ set_bit(Journal, &rdev->flags);
/*
* check whether the device shows up in other nodes
*/
@@ -7288,6 +7304,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "(F)");
continue;
}
+ if (test_bit(Journal, &rdev->flags)) {
+ seq_printf(seq, "(J)");
+ continue;
+ }
if (rdev->raid_disk < 0)
seq_printf(seq, "(S)"); /* spare */
if (test_bit(Replacement, &rdev->flags))
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7da6e9c..56a4015 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -176,6 +176,11 @@ enum flag_bits {
* This device is seen locally but not
* by the whole cluster
*/
+ Journal, /* This device is used as journal for
+ * raid-5/6.
+ * Usually, this device should be faster
+ * than other devices in the array
+ */
};
#define BB_LEN_MASK (0x00000000000001FFULL)
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index 3105190..a6ec473 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -89,9 +89,11 @@
* read requests will only be sent here in
* dire need
*/
+#define MD_DISK_JOURNAL 18 /* disk is used as the write journal in RAID-5/6 */
#define MD_DISK_ROLE_SPARE 0xffff
#define MD_DISK_ROLE_FAULTY 0xfffe
+#define MD_DISK_ROLE_JOURNAL 0xfffd
#define MD_DISK_ROLE_MAX 0xff00 /* max value of regular disk role */
typedef struct mdp_device_descriptor_s {
@@ -306,6 +308,7 @@ struct mdp_superblock_1 {
#define MD_FEATURE_RECOVERY_BITMAP 128 /* recovery that is happening
* is guided by bitmap.
*/
+#define MD_FEATURE_JOURNAL 256 /* support write cache */
#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \
|MD_FEATURE_RECOVERY_OFFSET \
|MD_FEATURE_RESHAPE_ACTIVE \
--
1.8.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [patch v2 03/11] md: override md superblock recovery_offset for journal device
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
2015-08-13 21:31 ` [patch v2 01/11] MD: replace special disk roles with macros Shaohua Li
2015-08-13 21:31 ` [patch v2 02/11] MD: add a new disk role to present write journal device Shaohua Li
@ 2015-08-13 21:31 ` Shaohua Li
2015-08-13 21:31 ` [patch v2 04/11] raid5: export some functions Shaohua Li
` (8 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Shaohua Li @ 2015-08-13 21:31 UTC (permalink / raw)
To: linux-raid; +Cc: Kernel-team, songliubraving, hch, dan.j.williams, neilb
Journal device stores data in a log structure. We need record the log
start. Here we override md superblock recovery_offset for this purpose.
This field of a journal device is meaningless otherwise.
Signed-off-by: Shaohua Li <shli@fb.com>
---
drivers/md/md.c | 6 ++++++
drivers/md/md.h | 8 +++++++-
include/uapi/linux/raid/md_p.h | 5 ++++-
3 files changed, 17 insertions(+), 2 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 7667cc1..4775029 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1665,6 +1665,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
return -EINVAL;
}
set_bit(Journal, &rdev->flags);
+ rdev->journal_tail = le64_to_cpu(sb->journal_tail);
break;
default:
rdev->saved_raid_disk = role;
@@ -1740,6 +1741,9 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
}
+ /* Note: recovery_offset and journal_tail share space */
+ if (test_bit(Journal, &rdev->flags))
+ sb->journal_tail = cpu_to_le64(rdev->journal_tail);
if (test_bit(Replacement, &rdev->flags))
sb->feature_map |=
cpu_to_le32(MD_FEATURE_REPLACEMENT);
@@ -8045,6 +8049,8 @@ static int remove_and_add_spares(struct mddev *mddev,
continue;
if (test_bit(Faulty, &rdev->flags))
continue;
+ if (test_bit(Journal, &rdev->flags))
+ continue;
if (mddev->ro &&
! (rdev->saved_raid_disk >= 0 &&
!test_bit(Bitmap_sync, &rdev->flags)))
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 56a4015..226f4ba 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -87,10 +87,16 @@ struct md_rdev {
* array and could again if we did a partial
* resync from the bitmap
*/
- sector_t recovery_offset;/* If this device has been partially
+ union {
+ sector_t recovery_offset;/* If this device has been partially
* recovered, this is where we were
* up to.
*/
+ sector_t journal_tail; /* If this device is a journal device,
+ * this is the journal tail (journal
+ * recovery start point)
+ */
+ };
atomic_t nr_pending; /* number of pending requests.
* only maintained for arrays that
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index a6ec473..d66387f 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -258,7 +258,10 @@ struct mdp_superblock_1 {
__le64 data_offset; /* sector start of data, often 0 */
__le64 data_size; /* sectors in this device that can be used for data */
__le64 super_offset; /* sector start of this superblock */
- __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */
+ union {
+ __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */
+ __le64 journal_tail;/* journal tail of journal device (from data_offset) */
+ };
__le32 dev_number; /* permanent identifier of this device - not role in raid */
__le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */
__u8 device_uuid[16]; /* user-space setable, ignored by kernel */
--
1.8.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [patch v2 04/11] raid5: export some functions
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
` (2 preceding siblings ...)
2015-08-13 21:31 ` [patch v2 03/11] md: override md superblock recovery_offset for " Shaohua Li
@ 2015-08-13 21:31 ` Shaohua Li
2015-08-13 21:31 ` [patch v2 05/11] raid5: add a new state for stripe log handling Shaohua Li
` (7 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Shaohua Li @ 2015-08-13 21:31 UTC (permalink / raw)
To: linux-raid; +Cc: Kernel-team, songliubraving, hch, dan.j.williams, neilb
Next several patches use some raid5 functions, rename them with raid5
prefix and export out.
Signed-off-by: Shaohua Li <shli@fb.com>
---
drivers/md/raid5.c | 94 ++++++++++++++++++++++++++----------------------------
drivers/md/raid5.h | 8 +++++
2 files changed, 54 insertions(+), 48 deletions(-)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f757023..786f811 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -357,7 +357,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
struct list_head *list = &temp_inactive_list[size - 1];
/*
- * We don't hold any lock here yet, get_active_stripe() might
+ * We don't hold any lock here yet, raid5_get_active_stripe() might
* remove stripes from the list
*/
if (!list_empty_careful(list)) {
@@ -417,7 +417,7 @@ static int release_stripe_list(struct r5conf *conf,
return count;
}
-static void release_stripe(struct stripe_head *sh)
+void raid5_release_stripe(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
unsigned long flags;
@@ -662,8 +662,8 @@ static int has_failed(struct r5conf *conf)
return 0;
}
-static struct stripe_head *
-get_active_stripe(struct r5conf *conf, sector_t sector,
+struct stripe_head *
+raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce)
{
struct stripe_head *sh;
@@ -862,7 +862,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
unlock_out:
unlock_two_stripes(head, sh);
out:
- release_stripe(head);
+ raid5_release_stripe(head);
}
/* Determine if 'data_offset' or 'new_data_offset' should be used
@@ -1214,7 +1214,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
return_io(return_bi);
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
static void ops_run_biofill(struct stripe_head *sh)
@@ -1277,7 +1277,7 @@ static void ops_complete_compute(void *stripe_head_ref)
if (sh->check_state == check_state_compute_run)
sh->check_state = check_state_compute_result;
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
/* return a pointer to the address conversion region of the scribble buffer */
@@ -1703,7 +1703,7 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
}
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
static void
@@ -1861,7 +1861,7 @@ static void ops_complete_check(void *stripe_head_ref)
sh->check_state = check_state_check_result;
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
@@ -2023,7 +2023,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
/* we just created an active stripe so... */
atomic_inc(&conf->active_stripes);
- release_stripe(sh);
+ raid5_release_stripe(sh);
conf->max_nr_stripes++;
return 1;
}
@@ -2242,7 +2242,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
if (!p)
err = -ENOMEM;
}
- release_stripe(nsh);
+ raid5_release_stripe(nsh);
}
/* critical section pass, GFP_NOIO no longer needed */
@@ -2402,7 +2402,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
rdev_dec_pending(rdev, conf->mddev);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
static void raid5_end_write_request(struct bio *bi, int error)
@@ -2477,14 +2477,12 @@ static void raid5_end_write_request(struct bio *bi, int error)
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
if (sh->batch_head && sh != sh->batch_head)
- release_stripe(sh->batch_head);
+ raid5_release_stripe(sh->batch_head);
}
-static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
-
static void raid5_build_block(struct stripe_head *sh, int i, int previous)
{
struct r5dev *dev = &sh->dev[i];
@@ -2500,7 +2498,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
dev->rreq.bi_private = sh;
dev->flags = 0;
- dev->sector = compute_blocknr(sh, i, previous);
+ dev->sector = raid5_compute_blocknr(sh, i, previous);
}
static void error(struct mddev *mddev, struct md_rdev *rdev)
@@ -2532,7 +2530,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
* Input: a 'big' sector number,
* Output: index of the data and parity disk, and the sector # in them.
*/
-static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
+sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
int previous, int *dd_idx,
struct stripe_head *sh)
{
@@ -2734,7 +2732,7 @@ static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
return new_sector;
}
-static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
+sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
{
struct r5conf *conf = sh->raid_conf;
int raid_disks = sh->disks;
@@ -3943,10 +3941,10 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
struct stripe_head *sh2;
struct async_submit_ctl submit;
- sector_t bn = compute_blocknr(sh, i, 1);
+ sector_t bn = raid5_compute_blocknr(sh, i, 1);
sector_t s = raid5_compute_sector(conf, bn, 0,
&dd_idx, NULL);
- sh2 = get_active_stripe(conf, s, 0, 1, 1);
+ sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1);
if (sh2 == NULL)
/* so far only the early blocks of this stripe
* have been requested. When later blocks
@@ -3956,7 +3954,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
/* must have already done this block */
- release_stripe(sh2);
+ raid5_release_stripe(sh2);
continue;
}
@@ -3977,7 +3975,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
set_bit(STRIPE_EXPAND_READY, &sh2->state);
set_bit(STRIPE_HANDLE, &sh2->state);
}
- release_stripe(sh2);
+ raid5_release_stripe(sh2);
}
/* done submitting copies, wait for them to complete */
@@ -4263,7 +4261,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
if (handle_flags == 0 ||
sh->state & handle_flags)
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
spin_lock_irq(&head_sh->stripe_lock);
head_sh->batch_head = NULL;
@@ -4510,7 +4508,7 @@ static void handle_stripe(struct stripe_head *sh)
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
struct stripe_head *sh_src
- = get_active_stripe(conf, sh->sector, 1, 1, 1);
+ = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1);
if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
/* sh cannot be written until sh_src has been read.
* so arrange for sh to be delayed a little
@@ -4520,11 +4518,11 @@ static void handle_stripe(struct stripe_head *sh)
if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
&sh_src->state))
atomic_inc(&conf->preread_active_stripes);
- release_stripe(sh_src);
+ raid5_release_stripe(sh_src);
goto finish;
}
if (sh_src)
- release_stripe(sh_src);
+ raid5_release_stripe(sh_src);
sh->reconstruct_state = reconstruct_state_idle;
clear_bit(STRIPE_EXPANDING, &sh->state);
@@ -5033,7 +5031,7 @@ static void release_stripe_plug(struct mddev *mddev,
struct raid5_plug_cb *cb;
if (!blk_cb) {
- release_stripe(sh);
+ raid5_release_stripe(sh);
return;
}
@@ -5049,7 +5047,7 @@ static void release_stripe_plug(struct mddev *mddev,
if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
list_add_tail(&sh->lru, &cb->list);
else
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
static void make_discard_request(struct mddev *mddev, struct bio *bi)
@@ -5084,12 +5082,12 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
DEFINE_WAIT(w);
int d;
again:
- sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
+ sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0);
prepare_to_wait(&conf->wait_for_overlap, &w,
TASK_UNINTERRUPTIBLE);
set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
if (test_bit(STRIPE_SYNCING, &sh->state)) {
- release_stripe(sh);
+ raid5_release_stripe(sh);
schedule();
goto again;
}
@@ -5101,7 +5099,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
if (sh->dev[d].towrite || sh->dev[d].toread) {
set_bit(R5_Overlap, &sh->dev[d].flags);
spin_unlock_irq(&sh->stripe_lock);
- release_stripe(sh);
+ raid5_release_stripe(sh);
schedule();
goto again;
}
@@ -5229,7 +5227,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
(unsigned long long)new_sector,
(unsigned long long)logical_sector);
- sh = get_active_stripe(conf, new_sector, previous,
+ sh = raid5_get_active_stripe(conf, new_sector, previous,
(bi->bi_rw&RWA_MASK), 0);
if (sh) {
if (unlikely(previous)) {
@@ -5250,7 +5248,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
must_retry = 1;
spin_unlock_irq(&conf->device_lock);
if (must_retry) {
- release_stripe(sh);
+ raid5_release_stripe(sh);
schedule();
do_prepare = true;
goto retry;
@@ -5260,14 +5258,14 @@ static void make_request(struct mddev *mddev, struct bio * bi)
/* Might have got the wrong stripe_head
* by accident
*/
- release_stripe(sh);
+ raid5_release_stripe(sh);
goto retry;
}
if (rw == WRITE &&
logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
- release_stripe(sh);
+ raid5_release_stripe(sh);
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -5290,7 +5288,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
* and wait a while
*/
md_wakeup_thread(mddev->thread);
- release_stripe(sh);
+ raid5_release_stripe(sh);
schedule();
do_prepare = true;
goto retry;
@@ -5468,7 +5466,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
int j;
int skipped_disk = 0;
- sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
+ sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
set_bit(STRIPE_EXPANDING, &sh->state);
atomic_inc(&conf->reshape_stripes);
/* If any of this stripe is beyond the end of the old
@@ -5481,7 +5479,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
if (conf->level == 6 &&
j == sh->qd_idx)
continue;
- s = compute_blocknr(sh, j, 0);
+ s = raid5_compute_blocknr(sh, j, 0);
if (s < raid5_size(mddev, 0, 0)) {
skipped_disk = 1;
continue;
@@ -5517,10 +5515,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
if (last_sector >= mddev->dev_sectors)
last_sector = mddev->dev_sectors - 1;
while (first_sector <= last_sector) {
- sh = get_active_stripe(conf, first_sector, 1, 0, 1);
+ sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1);
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
first_sector += STRIPE_SECTORS;
}
/* Now that the sources are clearly marked, we can release
@@ -5529,7 +5527,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
while (!list_empty(&stripes)) {
sh = list_entry(stripes.next, struct stripe_head, lru);
list_del_init(&sh->lru);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
/* If this takes us to the resync_max point where we have to pause,
* then we need to write out the superblock.
@@ -5624,9 +5622,9 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
- sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
+ sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
if (sh == NULL) {
- sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
+ sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0);
/* make sure we don't swamp the stripe cache if someone else
* is trying to get access
*/
@@ -5650,7 +5648,7 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
return STRIPE_SECTORS;
}
@@ -5689,7 +5687,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
/* already done this stripe */
continue;
- sh = get_active_stripe(conf, sector, 0, 1, 1);
+ sh = raid5_get_active_stripe(conf, sector, 0, 1, 1);
if (!sh) {
/* failed to get a stripe - must wait */
@@ -5699,7 +5697,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
}
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
- release_stripe(sh);
+ raid5_release_stripe(sh);
raid5_set_bi_processed_stripes(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
return handled;
@@ -5707,7 +5705,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
handle_stripe(sh);
- release_stripe(sh);
+ raid5_release_stripe(sh);
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index d051442..d8785df 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -606,4 +606,12 @@ static inline int algorithm_is_DDF(int layout)
extern void md_raid5_kick_device(struct r5conf *conf);
extern int raid5_set_cache_size(struct mddev *mddev, int size);
+extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
+extern void raid5_release_stripe(struct stripe_head *sh);
+extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
+ int previous, int *dd_idx,
+ struct stripe_head *sh);
+extern struct stripe_head *
+raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
+ int previous, int noblock, int noquiesce);
#endif
--
1.8.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [patch v2 05/11] raid5: add a new state for stripe log handling
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
` (3 preceding siblings ...)
2015-08-13 21:31 ` [patch v2 04/11] raid5: export some functions Shaohua Li
@ 2015-08-13 21:31 ` Shaohua Li
2015-08-13 21:31 ` [patch v2 06/11] raid5: add basic stripe log Shaohua Li
` (6 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Shaohua Li @ 2015-08-13 21:31 UTC (permalink / raw)
To: linux-raid; +Cc: Kernel-team, songliubraving, hch, dan.j.williams, neilb
When a stripe finishes construction, we write the stripe to raid in
ops_run_io normally. With log, we do a bunch of other operations before
the stripe is written to raid. Mainly write the stripe to log disk,
flush disk cache and so on. The operations are still driven by raid5d
and run in the stripe state machine. We introduce a new state for such
stripe (trapped into log). The stripe is in this state from the time it
first enters ops_run_io (finish construction) to the time it is written
to raid. Since we know the state is only for log, we bypass other
check/operation in handle_stripe.
Signed-off-by: Shaohua Li <shli@fb.com>
---
drivers/md/raid5.c | 3 +++
drivers/md/raid5.h | 1 +
2 files changed, 4 insertions(+)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 786f811..bbdadee 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4322,6 +4322,9 @@ static void handle_stripe(struct stripe_head *sh)
analyse_stripe(sh, &s);
+ if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
+ goto finish;
+
if (s.handle_bad_blocks) {
set_bit(STRIPE_HANDLE, &sh->state);
goto finish;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index d8785df..6e85b82 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -340,6 +340,7 @@ enum {
STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
* to batch yet.
*/
+ STRIPE_LOG_TRAPPED, /* trapped into log */
};
#define STRIPE_EXPAND_SYNC_FLAGS \
--
1.8.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [patch v2 06/11] raid5: add basic stripe log
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
` (4 preceding siblings ...)
2015-08-13 21:31 ` [patch v2 05/11] raid5: add a new state for stripe log handling Shaohua Li
@ 2015-08-13 21:31 ` Shaohua Li
2015-08-13 21:32 ` [patch v2 07/11] raid5: log reclaim support Shaohua Li
` (5 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Shaohua Li @ 2015-08-13 21:31 UTC (permalink / raw)
To: linux-raid; +Cc: Kernel-team, songliubraving, hch, dan.j.williams, neilb
This introduces a simple log for raid5. Data/parity writting to raid
array first writes to the log, then write to raid array disks. If crash
happens, we can recovery data from the log. This can speed up raid
resync and fix write hole issue.
The log structure is pretty simple. Data/meta data is stored in block
unit, which is 4k generally. It has only one type of meta data block.
The meta data block can track 3 types of data, stripe data, stripe
parity and flush block. MD superblock will point to the last valid meta
data block. Each meta data block has checksum/seq number, so recovery
can scan the log correctly. We store a checksum of stripe data/parity to
the metadata block, so meta data and stripe data/parity can be written
to log disk together. otherwise, meta data write must wait till stripe
data/parity is finished.
For stripe data, meta data block will record stripe data sector and
size. Currently the size is always 4k. This meta data record can be made
simpler if we just fix write hole (eg, we can record data of a stripe's
different disks together), but this format can be extended to support
caching in the future, which must record data address/size.
For stripe parity, meta data block will record stripe sector. It's size
should be 4k (for raid5) or 8k (for raid6). We always store p parity
first. This format should work for caching too.
flush block indicates a stripe is in raid array disks. Fixing write hole
doesn't need this type of meta data, it's for caching extention.
Signed-off-by: Shaohua Li <shli@fb.com>
---
drivers/md/Makefile | 2 +-
drivers/md/raid5-cache.c | 604 +++++++++++++++++++++++++++++++++++++++++
drivers/md/raid5.c | 4 +
drivers/md/raid5.h | 9 +
include/uapi/linux/raid/md_p.h | 57 ++++
5 files changed, 675 insertions(+), 1 deletion(-)
create mode 100644 drivers/md/raid5-cache.c
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 462f443..f34979c 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -17,7 +17,7 @@ dm-cache-smq-y += dm-cache-policy-smq.o
dm-cache-cleaner-y += dm-cache-policy-cleaner.o
dm-era-y += dm-era-target.o
md-mod-y += md.o bitmap.o
-raid456-y += raid5.o
+raid456-y += raid5.o raid5-cache.o
# Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
new file mode 100644
index 0000000..50328ee
--- /dev/null
+++ b/drivers/md/raid5-cache.c
@@ -0,0 +1,604 @@
+/*
+ * Copyright (C) 2015 Shaohua Li <shli@fb.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/raid/md_p.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include "md.h"
+#include "raid5.h"
+
+/*
+ * metadata/data stored in disk with 4k size unit (a block) regardless
+ * underneath hardware sector size. only works with PAGE_SIZE == 4096
+ * */
+#define BLOCK_SECTORS (8)
+
+struct r5l_log {
+ struct md_rdev *rdev;
+
+ u32 uuid_checksum;
+
+ sector_t device_size; /* log device size, round to BLOCK_SECTORS */
+
+ sector_t last_checkpoint; /* log tail. where recovery scan starts from */
+ u64 last_cp_seq; /* log tail sequence */
+
+ sector_t log_start; /* log head. where new data appends */
+ u64 seq; /* log head sequence */
+
+ struct mutex io_mutex;
+ struct r5l_io_unit *current_io; /* current io_unit accepting new data */
+
+ spinlock_t io_list_lock;
+ struct list_head running_ios; /* io_units which are still running,
+ * and have not yet been completely
+ * written to the log */
+ struct list_head io_end_ios; /* io_units which have been completely
+ * written to the log but not yet written
+ * to the RAID */
+
+ struct kmem_cache *io_kc;
+
+ struct list_head no_space_stripes; /* pending stripes, log has no space */
+ spinlock_t no_space_stripes_lock;
+};
+
+/*
+ * an IO range starts from a meta data block and end at the next meta data
+ * block. The io unit's the meta data block tracks data/parity followed it. io
+ * unit is written to log disk with normal write, as we always flush log disk
+ * first and then start move data to raid disks, there is no requirement to
+ * write io unit with FLUSH/FUA
+ * */
+struct r5l_io_unit {
+ struct r5l_log *log;
+
+ struct page *meta_page; /* store meta block */
+ int meta_offset; /* current offset in meta_page */
+
+ struct bio_list bios;
+ atomic_t pending_io; /* pending bios not written to log yet */
+ struct bio *current_bio; /* current_bio accepting new data */
+
+ atomic_t pending_stripe; /* how many stripes not flushed to raid */
+ u64 seq; /* seq number of the metablock */
+ sector_t log_start; /* where the io_unit starts */
+ sector_t log_end; /* where the io_unit ends */
+ struct list_head log_sibling; /* log->running_ios */
+ struct list_head stripe_list; /* stripes added to the io_unit */
+
+ int state;
+ wait_queue_head_t wait_state;
+};
+
+/* r5l_io_unit state */
+enum r5l_io_unit_state {
+ IO_UNIT_RUNNING = 0, /* accepting new IO */
+ IO_UNIT_IO_START = 1, /* io_unit bio start writting to log,
+ * don't accepting new bio */
+ IO_UNIT_IO_END = 2, /* io_unit bio finish writting to log */
+ IO_UNIT_STRIPE_START = 3, /* stripes of io_unit are flushing to raid */
+ IO_UNIT_STRIPE_END = 4, /* stripes data finished writting to raid */
+};
+
+static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
+{
+ start += inc;
+ if (start >= log->device_size)
+ start = start - log->device_size;
+ return start;
+}
+
+static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
+ sector_t end)
+{
+ if (end >= start)
+ return end - start;
+ else
+ return end + log->device_size - start;
+}
+
+static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
+{
+ sector_t used_size;
+
+ used_size = r5l_ring_distance(log, log->last_checkpoint,
+ log->log_start);
+
+ return log->device_size > used_size + size;
+}
+
+static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log)
+{
+ struct r5l_io_unit *io;
+ /* We can't handle memory allocate failure so far */
+ gfp_t gfp = GFP_NOIO | __GFP_NOFAIL;
+
+ io = kmem_cache_zalloc(log->io_kc, gfp);
+ io->log = log;
+ io->meta_page = alloc_page(gfp | __GFP_ZERO);
+
+ bio_list_init(&io->bios);
+ INIT_LIST_HEAD(&io->log_sibling);
+ INIT_LIST_HEAD(&io->stripe_list);
+ io->state = IO_UNIT_RUNNING;
+ init_waitqueue_head(&io->wait_state);
+ return io;
+}
+
+static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
+{
+ __free_page(io->meta_page);
+ kmem_cache_free(log->io_kc, io);
+}
+
+static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
+ enum r5l_io_unit_state state)
+{
+ struct r5l_io_unit *io;
+
+ while (!list_empty(from)) {
+ io = list_first_entry(from, struct r5l_io_unit, log_sibling);
+ /* don't change list order */
+ if (io->state >= state)
+ list_move_tail(&io->log_sibling, to);
+ else
+ break;
+ }
+}
+
+static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
+ enum r5l_io_unit_state state)
+{
+ struct r5l_log *log = io->log;
+
+ if (WARN_ON(io->state >= state))
+ return;
+ io->state = state;
+ if (state == IO_UNIT_IO_END)
+ r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios,
+ IO_UNIT_IO_END);
+ wake_up(&io->wait_state);
+}
+
+static void r5l_set_io_unit_state(struct r5l_io_unit *io,
+ enum r5l_io_unit_state state)
+{
+ struct r5l_log *log = io->log;
+ unsigned long flags;
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ __r5l_set_io_unit_state(io, state);
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+}
+
+static void r5l_log_endio(struct bio *bio, int error)
+{
+ struct r5l_io_unit *io = bio->bi_private;
+ struct r5l_log *log = io->log;
+
+ bio_put(bio);
+
+ if (!atomic_dec_and_test(&io->pending_io))
+ return;
+
+ r5l_set_io_unit_state(io, IO_UNIT_IO_END);
+ md_wakeup_thread(log->rdev->mddev->thread);
+}
+
+static void r5l_submit_current_io(struct r5l_log *log)
+{
+ struct r5l_io_unit *io = log->current_io;
+ struct r5l_meta_block *block;
+ struct bio *bio;
+ u32 crc;
+
+ if (!io)
+ return;
+
+ block = page_address(io->meta_page);
+ block->meta_size = cpu_to_le32(io->meta_offset);
+ crc = crc32_le(log->uuid_checksum, (void *)block, PAGE_SIZE);
+ block->checksum = cpu_to_le32(crc);
+
+ log->current_io = NULL;
+ r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+
+ while ((bio = bio_list_pop(&io->bios))) {
+ /* all IO must start from rdev->data_offset */
+ bio->bi_iter.bi_sector += log->rdev->data_offset;
+ submit_bio(WRITE, bio);
+ }
+}
+
+static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
+{
+ struct r5l_io_unit *io;
+ struct r5l_meta_block *block;
+ struct bio *bio;
+
+ io = r5l_alloc_io_unit(log);
+
+ block = page_address(io->meta_page);
+ block->magic = cpu_to_le32(R5LOG_MAGIC);
+ block->version = R5LOG_VERSION;
+ block->seq = cpu_to_le64(log->seq);
+ block->position = cpu_to_le64(log->log_start);
+
+ io->log_start = log->log_start;
+ io->meta_offset = sizeof(struct r5l_meta_block);
+ io->seq = log->seq;
+
+ bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL,
+ bio_get_nr_vecs(log->rdev->bdev));
+ io->current_bio = bio;
+ bio->bi_rw = WRITE;
+ bio->bi_bdev = log->rdev->bdev;
+ bio->bi_iter.bi_sector = log->log_start;
+ bio_add_page(bio, io->meta_page, PAGE_SIZE, 0);
+ bio->bi_end_io = r5l_log_endio;
+ bio->bi_private = io;
+
+ bio_list_add(&io->bios, bio);
+ atomic_inc(&io->pending_io);
+
+ log->seq++;
+ log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
+ io->log_end = log->log_start;
+ /* current bio hit disk end */
+ if (log->log_start == 0)
+ io->current_bio = NULL;
+
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&io->log_sibling, &log->running_ios);
+ spin_unlock_irq(&log->io_list_lock);
+
+ return io;
+}
+
+static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
+{
+ struct r5l_io_unit *io;
+
+ io = log->current_io;
+ if (io && io->meta_offset + payload_size > PAGE_SIZE)
+ r5l_submit_current_io(log);
+ io = log->current_io;
+ if (io)
+ return 0;
+
+ log->current_io = r5l_new_meta(log);
+ return 0;
+}
+
+static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
+ sector_t location, u32 checksum1, u32 checksum2,
+ bool checksum2_valid)
+{
+ struct r5l_io_unit *io = log->current_io;
+ struct r5l_payload_data_parity *payload;
+
+ payload = page_address(io->meta_page) + io->meta_offset;
+ payload->header.type = cpu_to_le16(type);
+ payload->header.flags = cpu_to_le16(0);
+ payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
+ (PAGE_SHIFT - 9));
+ payload->location = cpu_to_le64(location);
+ payload->checksum[0] = cpu_to_le32(checksum1);
+ if (checksum2_valid)
+ payload->checksum[1] = cpu_to_le32(checksum2);
+
+ io->meta_offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) * (1 + !!checksum2_valid);
+}
+
+static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
+{
+ struct r5l_io_unit *io = log->current_io;
+
+alloc_bio:
+ if (!io->current_bio) {
+ struct bio *bio;
+ bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL,
+ bio_get_nr_vecs(log->rdev->bdev));
+ bio->bi_rw = WRITE;
+ bio->bi_bdev = log->rdev->bdev;
+ bio->bi_iter.bi_sector = log->log_start;
+ bio->bi_end_io = r5l_log_endio;
+ bio->bi_private = io;
+ bio_list_add(&io->bios, bio);
+ atomic_inc(&io->pending_io);
+ io->current_bio = bio;
+ }
+ if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) {
+ io->current_bio = NULL;
+ goto alloc_bio;
+ }
+ log->log_start = r5l_ring_add(log, log->log_start,
+ BLOCK_SECTORS);
+ /* current bio hit disk end */
+ if (log->log_start == 0)
+ io->current_bio = NULL;
+
+ io->log_end = log->log_start;
+}
+
+static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
+ int data_pages, int parity_pages)
+{
+ int i;
+ int meta_size;
+ struct r5l_io_unit *io;
+
+ meta_size = (sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
+ * data_pages +
+ sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) * parity_pages;
+
+ r5l_get_meta(log, meta_size);
+ io = log->current_io;
+
+ for (i = 0; i < sh->disks; i++) {
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ continue;
+ if (i == sh->pd_idx || i == sh->qd_idx)
+ continue;
+ r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
+ raid5_compute_blocknr(sh, i, 0),
+ sh->dev[i].log_checksum, 0, false);
+ r5l_append_payload_page(log, sh->dev[i].page);
+ }
+
+ if (sh->qd_idx >= 0) {
+ r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
+ sh->sector, sh->dev[sh->pd_idx].log_checksum,
+ sh->dev[sh->qd_idx].log_checksum, true);
+ r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
+ r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
+ } else {
+ r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
+ sh->sector, sh->dev[sh->pd_idx].log_checksum,
+ 0, false);
+ r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
+ }
+
+ list_add_tail(&sh->log_list, &io->stripe_list);
+ atomic_inc(&io->pending_stripe);
+ sh->log_io = io;
+}
+
+/*
+ * running in raid5d, where reclaim could wait for raid5d too (when it flushes
+ * data from log to raid disks), so we shouldn't wait for reclaim here
+ * */
+int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
+{
+ int write_disks = 0;
+ int data_pages, parity_pages;
+ int meta_size;
+ int reserve;
+ int i;
+
+ if (!log)
+ return -EAGAIN;
+ /* Don't support stripe batch */
+ if (sh->log_io ||!test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
+ test_bit(STRIPE_SYNCING, &sh->state)) {
+ /* the stripe is written to log, we start writting it to raid */
+ clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
+ return -EAGAIN;
+ }
+
+ for (i = 0; i < sh->disks; i++) {
+ void *addr;
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ continue;
+ write_disks++;
+ /* checksum is already calculated in last run */
+ if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
+ continue;
+ addr = kmap_atomic(sh->dev[i].page);
+ sh->dev[i].log_checksum = crc32_le(log->uuid_checksum,
+ addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ }
+ parity_pages = 1 + !!(sh->qd_idx >= 0);
+ data_pages = write_disks - parity_pages;
+
+ meta_size = (sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
+ * data_pages +
+ sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) * parity_pages;
+ /* Doesn't work with very big raid array */
+ if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
+ return -EINVAL;
+
+ set_bit(STRIPE_LOG_TRAPPED, &sh->state);
+ atomic_inc(&sh->count);
+
+ mutex_lock(&log->io_mutex);
+ /* meta + data */
+ reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
+ if (r5l_has_free_space(log, reserve))
+ r5l_log_stripe(log, sh, data_pages, parity_pages);
+ else {
+ spin_lock(&log->no_space_stripes_lock);
+ list_add_tail(&sh->log_list, &log->no_space_stripes);
+ spin_unlock(&log->no_space_stripes_lock);
+
+ r5l_wake_reclaim(log, reserve);
+ }
+ mutex_unlock(&log->io_mutex);
+
+ return 0;
+}
+
+void r5l_write_stripe_run(struct r5l_log *log)
+{
+ if (!log)
+ return;
+ mutex_lock(&log->io_mutex);
+ r5l_submit_current_io(log);
+ mutex_unlock(&log->io_mutex);
+}
+
+/* This will run after log space is reclaimed */
+static void r5l_run_no_space_stripes(struct r5l_log *log)
+{
+ struct stripe_head *sh;
+
+ spin_lock(&log->no_space_stripes_lock);
+ while (!list_empty(&log->no_space_stripes)) {
+ sh = list_first_entry(&log->no_space_stripes,
+ struct stripe_head, log_list);
+ list_del_init(&sh->log_list);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ raid5_release_stripe(sh);
+ }
+ spin_unlock(&log->no_space_stripes_lock);
+}
+
+static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
+{
+ /* will implement later */
+}
+
+static int r5l_recovery_log(struct r5l_log *log)
+{
+ /* fake recovery */
+ log->seq = log->last_cp_seq + 1;
+ log->log_start = r5l_ring_add(log, log->last_checkpoint, BLOCK_SECTORS);
+ return 0;
+}
+
+static void r5l_write_super(struct r5l_log *log, sector_t cp)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ log->rdev->journal_tail = cp;
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+}
+
+static int r5l_load_log(struct r5l_log *log)
+{
+ struct md_rdev *rdev = log->rdev;
+ struct page *page;
+ struct r5l_meta_block *mb;
+ sector_t cp = log->rdev->journal_tail;
+ u32 stored_crc, expected_crc;
+ bool create_super = false;
+ int ret;
+
+ /* Make sure it's valid */
+ if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
+ cp = 0;
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
+ ret = -EIO;
+ goto ioerr;
+ }
+ mb = page_address(page);
+
+ if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
+ mb->version != R5LOG_VERSION) {
+ create_super = true;
+ goto create;
+ }
+ stored_crc = le32_to_cpu(mb->checksum);
+ mb->checksum = 0;
+ expected_crc = crc32_le(log->uuid_checksum, (void *)mb, PAGE_SIZE);
+ if (stored_crc != expected_crc) {
+ create_super = true;
+ goto create;
+ }
+ if (le64_to_cpu(mb->position) != cp) {
+ create_super = true;
+ goto create;
+ }
+create:
+ if (create_super) {
+ log->last_cp_seq = prandom_u32();
+ cp = 0;
+ /*
+ * Make sure super points to correct address. Log might have
+ * data very soon. If super hasn't correct log tail address,
+ * recovery can't find the log
+ * */
+ r5l_write_super(log, cp);
+ } else
+ log->last_cp_seq = le64_to_cpu(mb->seq);
+
+ log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
+ log->last_checkpoint = cp;
+
+ __free_page(page);
+
+ return r5l_recovery_log(log);
+ioerr:
+ __free_page(page);
+ return ret;
+}
+
+int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
+{
+ struct r5l_log *log;
+
+ if (PAGE_SIZE != 4096)
+ return -EINVAL;
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+ log->rdev = rdev;
+
+ log->uuid_checksum = crc32_le(~0, (void *)rdev->mddev->uuid,
+ sizeof(rdev->mddev->uuid));
+
+ mutex_init(&log->io_mutex);
+
+ spin_lock_init(&log->io_list_lock);
+ INIT_LIST_HEAD(&log->running_ios);
+
+ log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
+ if (!log->io_kc)
+ goto io_kc;
+
+ INIT_LIST_HEAD(&log->no_space_stripes);
+ spin_lock_init(&log->no_space_stripes_lock);
+
+ if (r5l_load_log(log))
+ goto error;
+
+ conf->log = log;
+ return 0;
+error:
+ kmem_cache_destroy(log->io_kc);
+io_kc:
+ kfree(log);
+ return -EINVAL;
+}
+
+void r5l_exit_log(struct r5l_log *log)
+{
+ kmem_cache_destroy(log->io_kc);
+ kfree(log);
+}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index bbdadee..fb8a811 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -899,6 +899,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
might_sleep();
+ if (r5l_write_stripe(conf->log, sh) == 0)
+ return;
for (i = disks; i--; ) {
int rw;
int replace_only = 0;
@@ -3501,6 +3503,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
WARN_ON(dev->page != dev->orig_page);
}
+
if (!discard_pending &&
test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
@@ -5754,6 +5757,7 @@ static int handle_active_stripes(struct r5conf *conf, int group,
for (i = 0; i < batch_size; i++)
handle_stripe(batch[i]);
+ r5l_write_stripe_run(conf->log);
cond_resched();
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 6e85b82..ff666eb 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -223,6 +223,9 @@ struct stripe_head {
struct stripe_head *batch_head; /* protected by stripe lock */
spinlock_t batch_lock; /* only header's lock is useful */
struct list_head batch_list; /* protected by head's batch lock*/
+
+ struct r5l_io_unit *log_io;
+ struct list_head log_list;
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -244,6 +247,7 @@ struct stripe_head {
struct bio *toread, *read, *towrite, *written;
sector_t sector; /* sector of this page */
unsigned long flags;
+ u32 log_checksum;
} dev[1]; /* allocated with extra space depending of RAID geometry */
};
@@ -541,6 +545,7 @@ struct r5conf {
struct r5worker_group *worker_groups;
int group_cnt;
int worker_cnt_per_group;
+ struct r5l_log *log;
};
@@ -615,4 +620,8 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
extern struct stripe_head *
raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce);
+extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
+extern void r5l_exit_log(struct r5l_log *log);
+extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
+extern void r5l_write_stripe_run(struct r5l_log *log);
#endif
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index d66387f..3553a72 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -322,4 +322,61 @@ struct mdp_superblock_1 {
|MD_FEATURE_RECOVERY_BITMAP \
)
+struct r5l_payload_header {
+ __le16 type;
+ __le16 flags;
+} __attribute__ ((__packed__));
+
+enum r5l_payload_type {
+ R5LOG_PAYLOAD_DATA = 0,
+ R5LOG_PAYLOAD_PARITY = 1,
+ R5LOG_PAYLOAD_FLUSH = 2,
+};
+
+struct r5l_payload_data_parity {
+ struct r5l_payload_header header;
+ __le32 size; /* sector. data/parity size. each 4k has a checksum */
+ __le64 location; /* sector. For data, it's raid sector. For
+ parity, it's stripe sector */
+ __le32 checksum[];
+} __attribute__ ((__packed__));
+
+enum r5l_payload_data_parity_flag {
+ R5LOG_PAYLOAD_FLAG_DISCARD = 1, /* payload is discard */
+ /*
+ * RESHAPED/RESHAPING is only set when there is reshape activity. Note,
+ * both data/parity of a stripe should have the same flag set
+ *
+ * RESHAPED: reshape is running, and this stripe finished reshape
+ * RESHAPING: reshape is running, and this stripe isn't reshaped
+ * */
+ R5LOG_PAYLOAD_FLAG_RESHAPED = 2,
+ R5LOG_PAYLOAD_FLAG_RESHAPING = 3,
+};
+
+struct r5l_payload_flush {
+ struct r5l_payload_header header;
+ __le32 size; /* flush_stripes size, bytes */
+ __le64 flush_stripes[];
+} __attribute__ ((__packed__));
+
+enum r5l_payload_flush_flag {
+ R5LOG_PAYLOAD_FLAG_FLUSH_STRIPE = 1, /* data represents whole stripe */
+};
+
+struct r5l_meta_block {
+ __le32 magic;
+ __le32 checksum;
+ __u8 version;
+ __u8 __zero_pading_1;
+ __le16 __zero_pading_2;
+ __le32 meta_size; /* whole size of the block */
+
+ __le64 seq;
+ __le64 position; /* sector, start from rdev->data_offset, current position */
+ struct r5l_payload_header payloads[];
+} __attribute__ ((__packed__));
+
+#define R5LOG_VERSION 0x1
+#define R5LOG_MAGIC 0x6433c509
#endif
--
1.8.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [patch v2 07/11] raid5: log reclaim support
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
` (5 preceding siblings ...)
2015-08-13 21:31 ` [patch v2 06/11] raid5: add basic stripe log Shaohua Li
@ 2015-08-13 21:32 ` Shaohua Li
2015-08-13 21:32 ` [patch v2 08/11] raid5: log recovery Shaohua Li
` (4 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Shaohua Li @ 2015-08-13 21:32 UTC (permalink / raw)
To: linux-raid; +Cc: Kernel-team, songliubraving, hch, dan.j.williams, neilb
This is the reclaim support for raid5 log. A stripe write will have
following steps:
1. reconstruct the stripe, read data/calculate parity. ops_run_io
prepares to write data/parity to raid disks
2. hijack ops_run_io. stripe data/parity is appending to log disk
3. flush log disk cache
4. ops_run_io run again and do normal operation. stripe data/parity is
written in raid array disks. raid core can return io to upper layer.
5. flush cache of all raid array disks
6. update super block
7. log disk space used by the stripe can be reused
In practice, several stripes consist of an io_unit and we will batch
several io_unit in different steps, but the whole process doesn't
change.
It's possible io return just after data/parity hit log disk, but then
read IO will need read from log disk. For simplicity, IO return happens
at step 4, where read IO can directly read from raid disks.
Currently reclaim run if there is specific reclaimable space (1/4 disk
size or 10G) or we are out of space. Reclaim is just to free log disk
spaces, it doesn't impact data consistency. The size based force reclaim
is to make sure log isn't too big, so recovery doesn't scan log too
much.
Recovery make sure raid disks and log disk have the same data of a
stripe. If crash happens before 4, recovery might/might not recovery
stripe's data/parity depending on if data/parity and its checksum
matches. In either case, this doesn't change the syntax of an IO write.
After step 3, stripe is guaranteed recoverable, because stripe's
data/parity is persistent in log disk. In some cases, log disk content
and raid disks content of a stripe are the same, but recovery will still
copy log disk content to raid disks, this doesn't impact data
consistency. space reuse happens after superblock update and cache
flush.
There is one situation we want to avoid. A broken meta in the middle of
a log causes recovery can't find meta at the head of log. If operations
require meta at the head persistent in log, we must make sure meta
before it persistent in log too. The case is stripe data/parity is in
log and we start write stripe to raid disks (before step 4). stripe
data/parity must be persistent in log before we do the write to raid
disks. The solution is we restrictly maintain io_unit list order. In
this case, we only write stripes of an io_unit to raid disks till the
io_unit is the first one whose data/parity is in log.
The io_unit list order is important for other cases too. For example,
some io_unit are reclaimable and others not. They can be mixed in the
list, we shouldn't reuse space of an unreclaimable io_unit.
Signed-off-by: Shaohua Li <shli@fb.com>
---
drivers/md/raid5-cache.c | 257 ++++++++++++++++++++++++++++++++++++++++++++++-
drivers/md/raid5.c | 6 ++
drivers/md/raid5.h | 2 +
3 files changed, 264 insertions(+), 1 deletion(-)
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 50328ee..d02a402 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -30,12 +30,20 @@
* */
#define BLOCK_SECTORS (8)
+/*
+ * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
+ * recovery scans a very long log
+ * */
+#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
+#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
+
struct r5l_log {
struct md_rdev *rdev;
u32 uuid_checksum;
sector_t device_size; /* log device size, round to BLOCK_SECTORS */
+ sector_t max_free_space; /* reclaim run if free space is at this size */
sector_t last_checkpoint; /* log tail. where recovery scan starts from */
u64 last_cp_seq; /* log tail sequence */
@@ -53,9 +61,20 @@ struct r5l_log {
struct list_head io_end_ios; /* io_units which have been completely
* written to the log but not yet written
* to the RAID */
+ struct list_head stripe_end_ios; /* io_units which have been
+ * completely written to the RAID *
+ * but have not yet been considered *
+ * for updating super */
struct kmem_cache *io_kc;
+ struct md_thread *reclaim_thread;
+ sector_t reclaim_target; /* number of space that need to be reclaimed.
+ * if it's 0, reclaim spaces used by io_units
+ * which are in IO_UNIT_STRIPE_END state (eg,
+ * reclaim dones't wait for specific io_unit
+ * switching to IO_UNIT_STRIPE_END state) */
+
struct list_head no_space_stripes; /* pending stripes, log has no space */
spinlock_t no_space_stripes_lock;
};
@@ -164,6 +183,35 @@ static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
}
}
+/*
+ * We don't want too many io_units reside in stripe_end_ios list, which will
+ * waste a lot of memory. So we try to remove some. But we must keep at least 2
+ * io_units. The superblock must point to a valid meta, if it's the last meta,
+ * recovery can scan less
+ * */
+static void r5l_compress_stripe_end_list(struct r5l_log *log)
+{
+ struct r5l_io_unit *first, *last, *io;
+
+ first = list_first_entry(&log->stripe_end_ios,
+ struct r5l_io_unit, log_sibling);
+ last = list_last_entry(&log->stripe_end_ios,
+ struct r5l_io_unit, log_sibling);
+ if (first == last)
+ return;
+ list_del(&first->log_sibling);
+ list_del(&last->log_sibling);
+ while (!list_empty(&log->stripe_end_ios)) {
+ io = list_first_entry(&log->stripe_end_ios,
+ struct r5l_io_unit, log_sibling);
+ list_del(&io->log_sibling);
+ first->log_end = io->log_end;
+ r5l_free_io_unit(log, io);
+ }
+ list_add_tail(&first->log_sibling, &log->stripe_end_ios);
+ list_add_tail(&last->log_sibling, &log->stripe_end_ios);
+}
+
static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
enum r5l_io_unit_state state)
@@ -176,6 +224,22 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
if (state == IO_UNIT_IO_END)
r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios,
IO_UNIT_IO_END);
+ if (state == IO_UNIT_STRIPE_END) {
+ struct r5l_io_unit *last;
+ sector_t reclaimable_space;
+
+ r5l_move_io_unit_list(&log->io_end_ios, &log->stripe_end_ios,
+ IO_UNIT_STRIPE_END);
+
+ last = list_last_entry(&log->stripe_end_ios,
+ struct r5l_io_unit, log_sibling);
+ reclaimable_space = r5l_ring_distance(log, log->last_checkpoint,
+ last->log_end);
+ if (reclaimable_space >= log->max_free_space)
+ r5l_wake_reclaim(log, 0);
+
+ r5l_compress_stripe_end_list(log);
+ }
wake_up(&io->wait_state);
}
@@ -476,9 +540,175 @@ static void r5l_run_no_space_stripes(struct r5l_log *log)
spin_unlock(&log->no_space_stripes_lock);
}
+void r5l_stripe_write_finished(struct stripe_head *sh)
+{
+ struct r5l_io_unit *io;
+
+ /* Don't support stripe batch */
+ io = sh->log_io;
+ if (!io)
+ return;
+ sh->log_io = NULL;
+
+ if (atomic_dec_and_test(&io->pending_stripe))
+ r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
+}
+
+/*
+ * Starting dispatch IO to raid.
+ * io_unit(meta) consists of a log. There is one situation we want to avoid. A
+ * broken meta in the middle of a log causes recovery can't find meta at the
+ * head of log. If operations require meta at the head persistent in log, we
+ * must make sure meta before it persistent in log too. A case is:
+ *
+ * stripe data/parity is in log, we start write stripe to raid disks. stripe
+ * data/parity must be persistent in log before we do the write to raid disks.
+ *
+ * The solution is we restrictly maintain io_unit list order. In this case, we
+ * only write stripes of an io_unit to raid disks till the io_unit is the first
+ * one whose data/parity is in log.
+ * */
+void r5l_flush_stripe_to_raid(struct r5l_log *log)
+{
+ struct r5l_io_unit *io;
+ struct stripe_head *sh;
+ bool run_stripe;
+
+ if (!log)
+ return;
+ spin_lock_irq(&log->io_list_lock);
+ run_stripe = !list_empty(&log->io_end_ios);
+ spin_unlock_irq(&log->io_list_lock);
+
+ if (!run_stripe)
+ return;
+
+ blkdev_issue_flush(log->rdev->bdev, GFP_NOIO, NULL);
+
+ spin_lock_irq(&log->io_list_lock);
+ list_for_each_entry(io, &log->io_end_ios, log_sibling) {
+ if (io->state >= IO_UNIT_STRIPE_START)
+ continue;
+ __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_START);
+
+ while (!list_empty(&io->stripe_list)) {
+ sh = list_first_entry(&io->stripe_list,
+ struct stripe_head, log_list);
+ list_del_init(&sh->log_list);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ raid5_release_stripe(sh);
+ }
+ }
+ spin_unlock_irq(&log->io_list_lock);
+}
+
+static void r5l_kick_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
+{
+ /* the log thread will write the io unit */
+ wait_event(io->wait_state, io->state >= IO_UNIT_IO_END);
+ if (io->state < IO_UNIT_STRIPE_START)
+ r5l_flush_stripe_to_raid(log);
+ wait_event(io->wait_state, io->state >= IO_UNIT_STRIPE_END);
+}
+
+static void r5l_write_super(struct r5l_log *log, sector_t cp);
+static void r5l_do_reclaim(struct r5l_log *log)
+{
+ struct r5l_io_unit *io, *last;
+ LIST_HEAD(list);
+ sector_t free = 0;
+ sector_t reclaim_target = xchg(&log->reclaim_target, 0);
+
+ spin_lock_irq(&log->io_list_lock);
+ /*
+ * move proper io_unit to reclaim list. We should not change the order.
+ * reclaimable/unreclaimable io_unit can be mixed in the list, we
+ * shouldn't reuse space of an unreclaimable io_unit
+ * */
+ while (1) {
+ while (!list_empty(&log->stripe_end_ios)) {
+ io = list_first_entry(&log->stripe_end_ios,
+ struct r5l_io_unit, log_sibling);
+ list_move_tail(&io->log_sibling, &list);
+ free += r5l_ring_distance(log, io->log_start,
+ io->log_end);
+ }
+
+ if (free >= reclaim_target ||
+ (list_empty(&log->running_ios) &&
+ list_empty(&log->io_end_ios) &&
+ list_empty(&log->stripe_end_ios)))
+ break;
+
+ /* Below waiting mostly happens when we shutdown the raid */
+ if (!list_empty(&log->io_end_ios)) {
+ io = list_first_entry(&log->io_end_ios,
+ struct r5l_io_unit, log_sibling);
+ spin_unlock_irq(&log->io_list_lock);
+ /* nobody else can delete the io, we are safe */
+ r5l_kick_io_unit(log, io);
+ spin_lock_irq(&log->io_list_lock);
+ continue;
+ }
+
+ if (!list_empty(&log->running_ios)) {
+ io = list_first_entry(&log->running_ios,
+ struct r5l_io_unit, log_sibling);
+ spin_unlock_irq(&log->io_list_lock);
+ /* nobody else can delete the io, we are safe */
+ r5l_kick_io_unit(log, io);
+ spin_lock_irq(&log->io_list_lock);
+ continue;
+ }
+ }
+ spin_unlock_irq(&log->io_list_lock);
+
+ if (list_empty(&list))
+ return;
+
+ /* super always point to last valid meta */
+ last = list_last_entry(&list, struct r5l_io_unit, log_sibling);
+ /*
+ * write_super will flush cache of each raid disk. We must write super
+ * here, because the log area might be reused soon and we don't want to
+ * confuse recovery
+ * */
+ r5l_write_super(log, last->log_start);
+
+ mutex_lock(&log->io_mutex);
+ log->last_checkpoint = last->log_start;
+ log->last_cp_seq = last->seq;
+ mutex_unlock(&log->io_mutex);
+ r5l_run_no_space_stripes(log);
+
+ while (!list_empty(&list)) {
+ io = list_first_entry(&list, struct r5l_io_unit, log_sibling);
+ list_del(&io->log_sibling);
+ r5l_free_io_unit(log, io);
+ }
+}
+
+static void r5l_reclaim_thread(struct md_thread *thread)
+{
+ struct mddev *mddev = thread->mddev;
+ struct r5conf *conf = mddev->private;
+ struct r5l_log *log = conf->log;
+
+ if (!log)
+ return;
+ r5l_do_reclaim(log);
+}
+
static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
{
- /* will implement later */
+ sector_t target;
+
+ do {
+ target = log->reclaim_target;
+ if (space < target)
+ return;
+ } while (cmpxchg(&log->reclaim_target, target, space) != target);
+ md_wakeup_thread(log->reclaim_thread);
}
static int r5l_recovery_log(struct r5l_log *log)
@@ -549,6 +779,9 @@ static int r5l_load_log(struct r5l_log *log)
log->last_cp_seq = le64_to_cpu(mb->seq);
log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
+ log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
+ if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
+ log->max_free_space = RECLAIM_MAX_FREE_SPACE;
log->last_checkpoint = cp;
__free_page(page);
@@ -577,11 +810,18 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
spin_lock_init(&log->io_list_lock);
INIT_LIST_HEAD(&log->running_ios);
+ INIT_LIST_HEAD(&log->io_end_ios);
+ INIT_LIST_HEAD(&log->stripe_end_ios);
log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
if (!log->io_kc)
goto io_kc;
+ log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
+ log->rdev->mddev, "reclaim");
+ if (!log->reclaim_thread)
+ goto reclaim_thread;
+
INIT_LIST_HEAD(&log->no_space_stripes);
spin_lock_init(&log->no_space_stripes_lock);
@@ -591,6 +831,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
conf->log = log;
return 0;
error:
+ md_unregister_thread(&log->reclaim_thread);
+reclaim_thread:
kmem_cache_destroy(log->io_kc);
io_kc:
kfree(log);
@@ -599,6 +841,19 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
void r5l_exit_log(struct r5l_log *log)
{
+ /*
+ * at this point all stripes are finished, so io_unit is at least in
+ * STRIPE_END state
+ * */
+ r5l_wake_reclaim(log, -1L);
+ md_unregister_thread(&log->reclaim_thread);
+ r5l_do_reclaim(log);
+ /*
+ * force a super update, r5l_do_reclaim might updated the super.
+ * mddev->thread is already stopped
+ * */
+ md_update_sb(log->rdev->mddev, 1);
+
kmem_cache_destroy(log->io_kc);
kfree(log);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index fb8a811..40799ed 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3106,6 +3106,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
if (bi)
bitmap_end = 1;
+ r5l_stripe_write_finished(sh);
+
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
@@ -3504,6 +3506,8 @@ static void handle_stripe_clean_event(struct r5conf *conf,
WARN_ON(dev->page != dev->orig_page);
}
+ r5l_stripe_write_finished(sh);
+
if (!discard_pending &&
test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
@@ -5879,6 +5883,8 @@ static void raid5d(struct md_thread *thread)
mutex_unlock(&conf->cache_size_mutex);
}
+ r5l_flush_stripe_to_raid(conf->log);
+
async_tx_issue_pending_all();
blk_finish_plug(&plug);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index ff666eb..d098ca9 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -624,4 +624,6 @@ extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
extern void r5l_exit_log(struct r5l_log *log);
extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
extern void r5l_write_stripe_run(struct r5l_log *log);
+extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
+extern void r5l_stripe_write_finished(struct stripe_head *sh);
#endif
--
1.8.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [patch v2 08/11] raid5: log recovery
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
` (6 preceding siblings ...)
2015-08-13 21:32 ` [patch v2 07/11] raid5: log reclaim support Shaohua Li
@ 2015-08-13 21:32 ` Shaohua Li
2015-08-13 21:32 ` [patch v2 09/11] raid5: disable batch with log enabled Shaohua Li
` (3 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Shaohua Li @ 2015-08-13 21:32 UTC (permalink / raw)
To: linux-raid; +Cc: Kernel-team, songliubraving, hch, dan.j.williams, neilb
This is the log recovery support. The process is quite straightforward.
We scan the log and read all valid meta/data/parity into memory. If a
stripe's data/parity checksum is correct, the stripe will be recoveried.
Otherwise, it's discarded and we don't scan the log further. The reclaim
process guarantees stripe which starts to be flushed raid disks has
completed data/parity and has correct checksum. To recovery a stripe, we
just copy its data/parity to corresponding raid disks.
The trick thing is superblock update after recovery. we can't let
superblock point to last valid meta block. The log might look like:
| meta 1| meta 2| meta 3|
meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If superblock
points to meta 1, we write a new valid meta 2n. If crash happens again,
new recovery will start from meta 1. Since meta 2n is valid, recovery
will think meta 3 is valid, which is wrong. The solution is we create a
new meta in meta2 with its seq == meta 1's seq + 10 and let superblock
points to meta2. recovery will not think meta 3 is a valid meta,
because its seq is wrong
Signed-off-by: Shaohua Li <shli@fb.com>
---
drivers/md/raid5-cache.c | 241 ++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 238 insertions(+), 3 deletions(-)
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index d02a402..52feb90 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -711,11 +711,246 @@ static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
md_wakeup_thread(log->reclaim_thread);
}
+struct r5l_recovery_ctx {
+ struct page *meta_page; /* current meta */
+ sector_t meta_total_blocks; /* total size of current meta and data */
+ sector_t pos; /* recovery position */
+ u64 seq; /* recovery position seq */
+};
+
+static int r5l_read_meta_block(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct page *page = ctx->meta_page;
+ struct r5l_meta_block *mb;
+ u32 crc, stored_crc;
+
+ if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
+ return -EIO;
+
+ mb = page_address(page);
+ stored_crc = le32_to_cpu(mb->checksum);
+ mb->checksum = 0;
+
+ if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
+ le64_to_cpu(mb->seq) != ctx->seq ||
+ mb->version != R5LOG_VERSION ||
+ le64_to_cpu(mb->position) != ctx->pos)
+ return -EINVAL;
+
+ crc = crc32_le(log->uuid_checksum, (void *)mb, PAGE_SIZE);
+ if (stored_crc != crc)
+ return -EINVAL;
+
+ if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
+ return -EINVAL;
+
+ ctx->meta_total_blocks = BLOCK_SECTORS;
+
+ return 0;
+}
+
+static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx, sector_t stripe_sect,
+ int *offset, sector_t *log_offset)
+{
+ struct r5conf *conf = log->rdev->mddev->private;
+ struct stripe_head *sh;
+ struct r5l_payload_data_parity *payload;
+ int disk_index;
+
+ sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
+ while (1) {
+ payload = page_address(ctx->meta_page) + *offset;
+
+ if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
+ raid5_compute_sector(conf,
+ le64_to_cpu(payload->location), 0,
+ &disk_index, sh);
+
+ sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
+ sh->dev[disk_index].page, READ, false);
+ sh->dev[disk_index].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
+ ctx->meta_total_blocks += BLOCK_SECTORS;
+ } else {
+ disk_index = sh->pd_idx;
+ sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
+ sh->dev[disk_index].page, READ, false);
+ sh->dev[disk_index].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
+
+ if (sh->qd_idx >= 0) {
+ disk_index = sh->qd_idx;
+ sync_page_io(log->rdev,
+ r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
+ PAGE_SIZE, sh->dev[disk_index].page,
+ READ, false);
+ sh->dev[disk_index].log_checksum =
+ le32_to_cpu(payload->checksum[1]);
+ set_bit(R5_Wantwrite,
+ &sh->dev[disk_index].flags);
+ }
+ ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
+ }
+
+ *log_offset = r5l_ring_add(log, *log_offset,
+ le32_to_cpu(payload->size));
+ *offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+ if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
+ break;
+ }
+
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
+ void *addr;
+ u32 checksum;
+
+ if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
+ continue;
+ addr = kmap_atomic(sh->dev[disk_index].page);
+ checksum = crc32_le(log->uuid_checksum, addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ if (checksum != sh->dev[disk_index].log_checksum)
+ goto error;
+ }
+
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
+ struct md_rdev *rdev, *rrdev;
+ if (!test_and_clear_bit(R5_Wantwrite,
+ &sh->dev[disk_index].flags))
+ continue;
+
+ /* in case device is broken */
+ rdev = rcu_dereference(conf->disks[disk_index].rdev);
+ if (rdev)
+ sync_page_io(rdev, stripe_sect, PAGE_SIZE,
+ sh->dev[disk_index].page, WRITE, false);
+ rrdev = rcu_dereference(conf->disks[disk_index].replacement);
+ if (rrdev)
+ sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
+ sh->dev[disk_index].page, WRITE, false);
+ }
+ raid5_release_stripe(sh);
+ return 0;
+
+error:
+ for (disk_index = 0; disk_index < sh->disks; disk_index++)
+ sh->dev[disk_index].flags = 0;
+ raid5_release_stripe(sh);
+ return -EINVAL;
+}
+
+static int r5l_recovery_flush_one_meta(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct r5conf *conf = log->rdev->mddev->private;
+ struct r5l_payload_data_parity *payload;
+ struct r5l_meta_block *mb;
+ int offset;
+ sector_t log_offset;
+ sector_t stripe_sector;
+
+ mb = page_address(ctx->meta_page);
+ offset = sizeof(struct r5l_meta_block);
+ log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+
+ while (offset < le32_to_cpu(mb->meta_size)) {
+ int dd;
+
+ payload = (void *)mb + offset;
+ stripe_sector = raid5_compute_sector(conf,
+ le64_to_cpu(payload->location), 0, &dd, NULL);
+ if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
+ &offset, &log_offset))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* copy data/parity from log to raid disks */
+static void r5l_recovery_flush_log(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ while (1) {
+ if (r5l_read_meta_block(log, ctx))
+ return;
+ if (r5l_recovery_flush_one_meta(log, ctx))
+ return;
+ ctx->seq++;
+ ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
+ }
+}
+
+static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
+ u64 seq)
+{
+ struct page *page;
+ struct r5l_meta_block *mb;
+ u32 crc;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return -ENOMEM;
+ mb = page_address(page);
+ mb->magic = cpu_to_le32(R5LOG_MAGIC);
+ mb->version = R5LOG_VERSION;
+ mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
+ mb->seq = cpu_to_le64(seq);
+ mb->position = cpu_to_le64(pos);
+ crc = crc32_le(log->uuid_checksum, (void *)mb, PAGE_SIZE);
+ mb->checksum = cpu_to_le32(crc);
+
+ if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
+ __free_page(page);
+ return -EIO;
+ }
+ __free_page(page);
+ return 0;
+}
+
static int r5l_recovery_log(struct r5l_log *log)
{
- /* fake recovery */
- log->seq = log->last_cp_seq + 1;
- log->log_start = r5l_ring_add(log, log->last_checkpoint, BLOCK_SECTORS);
+ struct r5l_recovery_ctx ctx;
+
+ ctx.pos = log->last_checkpoint;
+ ctx.seq = log->last_cp_seq;
+ ctx.meta_page = alloc_page(GFP_KERNEL);
+ if (!ctx.meta_page)
+ return -ENOMEM;
+
+ r5l_recovery_flush_log(log, &ctx);
+ __free_page(ctx.meta_page);
+
+ /*
+ * we did a recovery. Now ctx.pos points to an invalid meta block. New
+ * log will start here. but we can't let superblock point to last valid
+ * meta block. The log might looks like:
+ * | meta 1| meta 2| meta 3|
+ * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
+ * superblock points to meta 1, we write a new valid meta 2n. if crash
+ * happens again, new recovery will start from meta 1. Since meta 2n is
+ * valid now, recovery will think meta 3 is valid, which is wrong.
+ * The solution is we create a new meta in meta2 with its seq == meta
+ * 1's seq + 10 and let superblock points to meta2. The same recovery will
+ * not think meta 3 is a valid meta, because its seq doesn't match
+ */
+ if (ctx.seq > log->last_cp_seq + 1) {
+ int ret;
+
+ ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
+ if (ret)
+ return ret;
+ log->seq = ctx.seq + 11;
+ log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
+ r5l_write_super(log, ctx.pos);
+ } else {
+ log->log_start = ctx.pos;
+ log->seq = ctx.seq;
+ }
return 0;
}
--
1.8.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [patch v2 09/11] raid5: disable batch with log enabled
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
` (7 preceding siblings ...)
2015-08-13 21:32 ` [patch v2 08/11] raid5: log recovery Shaohua Li
@ 2015-08-13 21:32 ` Shaohua Li
2015-08-13 21:32 ` [patch v2 10/11] raid5: don't allow resize/reshape with cache(log) support Shaohua Li
` (2 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Shaohua Li @ 2015-08-13 21:32 UTC (permalink / raw)
To: linux-raid; +Cc: Kernel-team, songliubraving, hch, dan.j.williams, neilb
With log enabled, r5l_write_stripe will add the stripe to log. With
batch, several stripes are linked together. The stripes must be in the
same state. While with log, the log/reclaim unit is stripe, we can't
guarantee the several stripes are in the same state. Disabling batch for
log now.
Signed-off-by: Shaohua Li <shli@fb.com>
---
drivers/md/raid5.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 40799ed..80a9a78 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -759,6 +759,9 @@ static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
/* Only freshly new full stripe normal write stripe can be added to a batch list */
static bool stripe_can_batch(struct stripe_head *sh)
{
+ struct r5conf *conf = sh->raid_conf;
+ if (conf->log)
+ return false;
return test_bit(STRIPE_BATCH_READY, &sh->state) &&
!test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
is_full_stripe_write(sh);
--
1.8.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [patch v2 10/11] raid5: don't allow resize/reshape with cache(log) support
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
` (8 preceding siblings ...)
2015-08-13 21:32 ` [patch v2 09/11] raid5: disable batch with log enabled Shaohua Li
@ 2015-08-13 21:32 ` Shaohua Li
2015-08-13 21:32 ` [patch v2 11/11] raid5: enable log for raid array with cache disk Shaohua Li
2015-08-17 4:45 ` [patch v2 00/11]md: fix raid5 write hole NeilBrown
11 siblings, 0 replies; 13+ messages in thread
From: Shaohua Li @ 2015-08-13 21:32 UTC (permalink / raw)
To: linux-raid; +Cc: Kernel-team, songliubraving, hch, dan.j.williams, neilb
If cache(log) support is enabled, don't allow resize/reshape in current
stage. In the future, we can flush all data from cache(log) to raid
before resize/reshape and then allow resize/reshape.
Signed-off-by: Shaohua Li <shli@fb.com>
---
drivers/md/raid5.c | 17 +++++++++++++++++
1 file changed, 17 insertions(+)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 80a9a78..343536d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6655,6 +6655,7 @@ static int run(struct mddev *mddev)
int working_disks = 0;
int dirty_parity_disks = 0;
struct md_rdev *rdev;
+ struct md_rdev *journal_dev = NULL;
sector_t reshape_offset = 0;
int i;
long long min_offset_diff = 0;
@@ -6667,6 +6668,9 @@ static int run(struct mddev *mddev)
rdev_for_each(rdev, mddev) {
long long diff;
+
+ if (test_bit(Journal, &rdev->flags))
+ journal_dev = rdev;
if (rdev->raid_disk < 0)
continue;
diff = (rdev->new_data_offset - rdev->data_offset);
@@ -6698,6 +6702,13 @@ static int run(struct mddev *mddev)
int old_disks;
int max_degraded = (mddev->level == 6 ? 2 : 1);
+ if (journal_dev) {
+ printk(KERN_ERR "md/raid:%s: don't support reshape "
+ "with journal - aborting.\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+
if (mddev->new_level != mddev->level) {
printk(KERN_ERR "md/raid:%s: unsupported reshape "
"required - aborting.\n",
@@ -7209,6 +7220,10 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
* worth it.
*/
sector_t newsize;
+ struct r5conf *conf = mddev->private;
+
+ if (conf->log)
+ return -EINVAL;
sectors &= ~((sector_t)mddev->chunk_sectors - 1);
newsize = raid5_size(mddev, sectors, mddev->raid_disks);
if (mddev->external_size &&
@@ -7260,6 +7275,8 @@ static int check_reshape(struct mddev *mddev)
{
struct r5conf *conf = mddev->private;
+ if (conf->log)
+ return -EINVAL;
if (mddev->delta_disks == 0 &&
mddev->new_layout == mddev->layout &&
mddev->new_chunk_sectors == mddev->chunk_sectors)
--
1.8.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [patch v2 11/11] raid5: enable log for raid array with cache disk
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
` (9 preceding siblings ...)
2015-08-13 21:32 ` [patch v2 10/11] raid5: don't allow resize/reshape with cache(log) support Shaohua Li
@ 2015-08-13 21:32 ` Shaohua Li
2015-08-17 4:45 ` [patch v2 00/11]md: fix raid5 write hole NeilBrown
11 siblings, 0 replies; 13+ messages in thread
From: Shaohua Li @ 2015-08-13 21:32 UTC (permalink / raw)
To: linux-raid; +Cc: Kernel-team, songliubraving, hch, dan.j.williams, neilb
Now log is safe to enable for raid array with cache disk
Signed-off-by: Shaohua Li <shli@fb.com>
---
drivers/md/raid5.c | 10 ++++++++++
include/uapi/linux/raid/md_p.h | 1 +
2 files changed, 11 insertions(+)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 343536d..28cccfa 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6325,8 +6325,11 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ if (conf->log)
+ r5l_exit_log(conf->log);
if (conf->shrinker.seeks)
unregister_shrinker(&conf->shrinker);
+
free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
@@ -6990,6 +6993,13 @@ static int run(struct mddev *mddev)
mddev->queue);
}
+ if (journal_dev) {
+ char b[BDEVNAME_SIZE];
+ printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
+ mdname(mddev), bdevname(journal_dev->bdev, b));
+ r5l_init_log(conf, journal_dev);
+ }
+
return 0;
abort:
md_unregister_thread(&mddev->thread);
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index 3553a72..1de9e17 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -320,6 +320,7 @@ struct mdp_superblock_1 {
|MD_FEATURE_RESHAPE_BACKWARDS \
|MD_FEATURE_NEW_OFFSET \
|MD_FEATURE_RECOVERY_BITMAP \
+ |MD_FEATURE_JOURNAL \
)
struct r5l_payload_header {
--
1.8.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [patch v2 00/11]md: fix raid5 write hole
2015-08-13 21:31 [patch v2 00/11]md: fix raid5 write hole Shaohua Li
` (10 preceding siblings ...)
2015-08-13 21:32 ` [patch v2 11/11] raid5: enable log for raid array with cache disk Shaohua Li
@ 2015-08-17 4:45 ` NeilBrown
11 siblings, 0 replies; 13+ messages in thread
From: NeilBrown @ 2015-08-17 4:45 UTC (permalink / raw)
To: Shaohua Li; +Cc: linux-raid, Kernel-team, songliubraving, hch, dan.j.williams
On Thu, 13 Aug 2015 14:31:53 -0700 Shaohua Li <shli@fb.com> wrote:
> Hi Neil,
>
> This is the updated patch for the raid5 write hole issue. I thought I addressed
> most of issues from you and Christoph. Please let me know if anything is
> missed. Something not done yet:
Hi Shaohua,
thanks for your persistence and patience. I does look like you have
address everything, though I'm a bit foggy today (recovering from a
cold) so I cannot give it the attention it deserves right now.
Hopefully I'll give you more feedback later in the week. I'll be
offline after that until late September so I'll try really hard to
raising anything important before then.
I'll probably queue these in my "devel" branch and aim for Linux 4.4.
That will give me (and others??) time to test and experiment some more.
I'd be quite happy to see patches on top of these for the write-ahead
caching when I get back in late September, if you happen to have them
ready by then.
>
> - Still use NOFAIL allocation. I don't think 2-element mempool is ok. An
> io_unit will have several bio (> 2), 2 element bioset isn't ok. We can
> dispatch all bio of the io_unit if bio allocation fails, but this will increase
> complexity. I'd prefer using NOFAIL allocation now for simplicity and fix it
> later if necessary
You might need to get a patch accepted which removes the line:
* __GFP_NOFAIL is not to be used in new code.
from mm/page_alloc.c :-(
... though from looking at the code, I think the flag is (currently)
ignored for single-page allocations, they never fail (if they are
allowed to wait).
It's all very confusing.
It can stay for now. It serves as useful documentation at least.
> - Add flag for reshape handling in disk format, but don't support it yet
That looks good, thanks.
Thanks,
NeilBrown
>
> Thanks,
> Shaohua
>
>
> Shaohua Li (9):
> md: override md superblock recovery_offset for journal device
> raid5: export some functions
> raid5: add a new state for stripe log handling
> raid5: add basic stripe log
> raid5: log reclaim support
> raid5: log recovery
> raid5: disable batch with log enabled
> raid5: don't allow resize/reshape with cache(log) support
> raid5: enable log for raid array with cache disk
>
> Song Liu (2):
> MD: replace special disk roles with macros
> MD: add a new disk role to present write journal device
>
> drivers/md/Makefile | 2 +-
> drivers/md/md.c | 44 +-
> drivers/md/md.h | 13 +-
> drivers/md/raid5-cache.c | 1094 ++++++++++++++++++++++++++++++++++++++++
> drivers/md/raid5.c | 137 +++--
> drivers/md/raid5.h | 20 +
> include/uapi/linux/raid/md_p.h | 70 ++-
> 7 files changed, 1320 insertions(+), 60 deletions(-)
> create mode 100644 drivers/md/raid5-cache.c
>
^ permalink raw reply [flat|nested] 13+ messages in thread