* [PATCH v2 1/3] md/r5cache: flush data only stripes in r5l_recovery_log()
@ 2017-01-24 1:12 Song Liu
2017-01-24 1:12 ` [PATCH v2 2/3] md/r5cache: shift complex rmw from read path to write path Song Liu
2017-01-24 1:12 ` [PATCH v2 3/3] md/r5cache: disable write back for degraded array Song Liu
0 siblings, 2 replies; 4+ messages in thread
From: Song Liu @ 2017-01-24 1:12 UTC (permalink / raw)
To: linux-raid
Cc: neilb, shli, kernel-team, dan.j.williams, hch, liuzhengyuan,
liuyun01, Song Liu, Jes.Sorensen
For safer operation, all arrays start in write-through mode.
However, if recovery found data-only stripes before the shutdown
(from previous write-back mode), it is not safe to run the array
in write-through mode. To solve this problem, we flush all data-only
stripes in r5l_recovery_log(). This logic is implemented in
r5c_recovery_flush_data_only_stripes():
1. enable write back cache
2. flush all stripes
3. wake up conf->mddev->thread
4. wait for all stripes get flushed (reuse wait_for_quiescent)
5. disable write back cache
The wait in 4 will be waked up in release_inactive_stripe_list()
when conf->active_stripes reaches 0.
It is safe to wake up mddev->thread here because all the resource
required for the thread has been initialized.
Signed-off-by: Song Liu <songliubraving@fb.com>
---
drivers/md/md.c | 5 +++++
drivers/md/raid5-cache.c | 56 ++++++++++++++++++++++++++++++++++--------------
2 files changed, 45 insertions(+), 16 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0abb147..85ac984 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5333,6 +5333,11 @@ int md_run(struct mddev *mddev)
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
+ /*
+ * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
+ * up mddev->thread. It is important to initialize critical
+ * resources for mddev->thread BEFORE calling pers->run().
+ */
err = pers->run(mddev);
if (err)
pr_warn("md: pers->run() failed ...\n");
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 3da5e2a..00d2838 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -2102,7 +2102,7 @@ static int
r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
struct r5l_recovery_ctx *ctx)
{
- struct stripe_head *sh, *next;
+ struct stripe_head *sh;
struct mddev *mddev = log->rdev->mddev;
struct page *page;
sector_t next_checkpoint = MaxSector;
@@ -2116,7 +2116,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
WARN_ON(list_empty(&ctx->cached_list));
- list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+ list_for_each_entry(sh, &ctx->cached_list, lru) {
struct r5l_meta_block *mb;
int i;
int offset;
@@ -2166,14 +2166,39 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
ctx->pos = write_pos;
ctx->seq += 1;
next_checkpoint = sh->log_start;
- list_del_init(&sh->lru);
- raid5_release_stripe(sh);
}
log->next_checkpoint = next_checkpoint;
__free_page(page);
return 0;
}
+static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ struct stripe_head *sh, *next;
+
+ if (ctx->data_only_stripes == 0)
+ return;
+
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
+
+ list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+ r5c_make_stripe_write_out(sh);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
+ }
+
+ md_wakeup_thread(conf->mddev->thread);
+ /* reuse conf->wait_for_quiescent in recovery */
+ wait_event(conf->wait_for_quiescent,
+ atomic_read(&conf->active_stripes) == 0);
+
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+}
+
static int r5l_recovery_log(struct r5l_log *log)
{
struct mddev *mddev = log->rdev->mddev;
@@ -2200,32 +2225,31 @@ static int r5l_recovery_log(struct r5l_log *log)
pos = ctx.pos;
ctx.seq += 10000;
- if (ctx.data_only_stripes == 0) {
- log->next_checkpoint = ctx.pos;
- r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
- ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
- }
if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
pr_debug("md/raid:%s: starting from clean shutdown\n",
mdname(mddev));
- else {
+ else
pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
mdname(mddev), ctx.data_only_stripes,
ctx.data_parity_stripes);
- if (ctx.data_only_stripes > 0)
- if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
- pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
- mdname(mddev));
- return -EIO;
- }
+ if (ctx.data_only_stripes == 0) {
+ log->next_checkpoint = ctx.pos;
+ r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
+ ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
+ } else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
+ pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
+ mdname(mddev));
+ return -EIO;
}
log->log_start = ctx.pos;
log->seq = ctx.seq;
log->last_checkpoint = pos;
r5l_write_super(log, pos);
+
+ r5c_recovery_flush_data_only_stripes(log, &ctx);
return 0;
}
--
2.9.3
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH v2 2/3] md/r5cache: shift complex rmw from read path to write path
2017-01-24 1:12 [PATCH v2 1/3] md/r5cache: flush data only stripes in r5l_recovery_log() Song Liu
@ 2017-01-24 1:12 ` Song Liu
2017-01-24 1:12 ` [PATCH v2 3/3] md/r5cache: disable write back for degraded array Song Liu
1 sibling, 0 replies; 4+ messages in thread
From: Song Liu @ 2017-01-24 1:12 UTC (permalink / raw)
To: linux-raid
Cc: neilb, shli, kernel-team, dan.j.williams, hch, liuzhengyuan,
liuyun01, Song Liu, Jes.Sorensen
Write back cache requires a complex RMW mechanism, where old data is
read into dev->orig_page for prexor, and then xor is done with
dev->page. This logic is already implemented in the write path.
However, current read path is not awared of this requirement. When
the array is optimal, the RMW is not required, as the data are
read from raid disks. However, when the target stripe is degraded,
complex RMW is required to generate right data.
To keep read path as clean as possible, we handle read path by
flushing degraded, in-journal stripes before processing reads to
missing dev.
Specifically, when there is read requests to a degraded stripe
with data in journal, handle_stripe_fill() calls
r5c_make_stripe_write_out() and exits. Then handle_stripe_dirtying()
will do the complex RMW and flush the stripe to RAID disks. After
that, read requests are handled.
There is one more corner case when there is non-overwrite bio for
the missing (or out of sync) dev. handle_stripe_dirtying() will not
be able to process the non-overwrite bios without constructing the
data in handle_stripe_fill(). This is fixed by delaying non-overwrite
bios in handle_stripe_dirtying(). So handle_stripe_fill() works on
these bios after the stripe is flushed to raid disks.
Signed-off-by: Song Liu <songliubraving@fb.com>
---
drivers/md/raid5.c | 48 ++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 44 insertions(+), 4 deletions(-)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f060ad6..ad8f24c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2934,6 +2934,30 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
return r_sector;
}
+/*
+ * There are cases where we want handle_stripe_dirtying() and
+ * schedule_reconstruction() to delay towrite to some dev of a stripe.
+ *
+ * This function checks whether we want to delay the towrite. Specifically,
+ * we delay the towrite when:
+ *
+ * 1. degraded stripe has a non-overwrite to the missing dev, AND this
+ * stripe has data in journal (for other devices).
+ *
+ * In this case, when reading data for the non-overwrite dev, it is
+ * necessary to handle complex rmw of write back cache (prexor with
+ * orig_page, and xor with page). To keep read path simple, we would
+ * like to flush data in journal to RAID disks first, so complex rmw
+ * is handled in the write patch (handle_stripe_dirtying).
+ *
+ */
+static inline bool delay_towrite(struct r5dev *dev,
+ struct stripe_head_state *s)
+{
+ return !test_bit(R5_OVERWRITE, &dev->flags) &&
+ !test_bit(R5_Insync, &dev->flags) && s->injournal;
+}
+
static void
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
@@ -2954,7 +2978,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (dev->towrite) {
+ if (dev->towrite && !delay_towrite(dev, s)) {
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantdrain, &dev->flags);
if (!expand)
@@ -3531,10 +3555,25 @@ static void handle_stripe_fill(struct stripe_head *sh,
* midst of changing due to a write
*/
if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
- !sh->reconstruct_state)
+ !sh->reconstruct_state) {
+
+ /* for degraded stripe with data in journal, do not handle
+ * read requests yet, instead, flush the stripe to raid
+ * disks first, this avoids handling complex rmw of write
+ * back cache (prexor with orig_page, and then xor with
+ * page) in the read path
+ */
+ if (s->injournal && s->failed) {
+ if (test_bit(STRIPE_R5C_CACHING, &sh->state))
+ r5c_make_stripe_write_out(sh);
+ goto out;
+ }
+
for (i = disks; i--; )
if (fetch_block(sh, s, i, disks))
break;
+ }
+out:
set_bit(STRIPE_HANDLE, &sh->state);
}
@@ -3690,7 +3729,8 @@ static int handle_stripe_dirtying(struct r5conf *conf,
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx ||
+ if (((dev->towrite && !delay_towrite(dev, s)) ||
+ i == sh->pd_idx || i == sh->qd_idx ||
test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(uptodate_for_rmw(dev) ||
@@ -3754,7 +3794,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite ||
+ if (((dev->towrite && !delay_towrite(dev, s)) ||
i == sh->pd_idx || i == sh->qd_idx ||
test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
--
2.9.3
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH v2 3/3] md/r5cache: disable write back for degraded array
2017-01-24 1:12 [PATCH v2 1/3] md/r5cache: flush data only stripes in r5l_recovery_log() Song Liu
2017-01-24 1:12 ` [PATCH v2 2/3] md/r5cache: shift complex rmw from read path to write path Song Liu
@ 2017-01-24 1:12 ` Song Liu
2017-01-24 17:56 ` Shaohua Li
1 sibling, 1 reply; 4+ messages in thread
From: Song Liu @ 2017-01-24 1:12 UTC (permalink / raw)
To: linux-raid
Cc: neilb, shli, kernel-team, dan.j.williams, hch, liuzhengyuan,
liuyun01, Song Liu, Jes.Sorensen
write-back cache in degraded mode introduces corner cases to the array.
Although we try to cover all these corner cases, it is safer to just
disable write-back cache when the array is in degraded mode.
In this patch, we disable writeback cache for degraded mode:
1. On device failure, if the array enters degraded mode, raid5_error()
will submit async job r5c_disable_writeback_async to disable
writeback;
2. In r5c_journal_mode_store(), it is invalid to enable writeback in
degraded mode;
3. In r5c_try_caching_write(), stripes with s->failed>0 will be handled
in write-through mode.
Signed-off-by: Song Liu <songliubraving@fb.com>
---
drivers/md/raid5-cache.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
drivers/md/raid5.c | 3 ++-
drivers/md/raid5.h | 2 ++
3 files changed, 48 insertions(+), 1 deletion(-)
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 00d2838..55f1a37 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -164,6 +164,9 @@ struct r5l_log {
/* to submit async io_units, to fulfill ordering of flush */
struct work_struct deferred_io_work;
+ /* to disable write back during in degraded mode */
+ struct work_struct disable_writeback_work;
+
/* to for chunk_aligned_read in writeback mode, details below */
spinlock_t tree_lock;
struct radix_tree_root big_stripe_tree;
@@ -653,6 +656,20 @@ static void r5l_submit_io_async(struct work_struct *work)
r5l_do_submit_io(log, io);
}
+static void r5c_disable_writeback_async(struct work_struct *work)
+{
+ struct r5l_log *log = container_of(work, struct r5l_log,
+ disable_writeback_work);
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+
+ pr_crit("md/raid:%s: Disabling writeback cache for degraded array.\n",
+ mdname(mddev));
+ mddev_suspend(mddev);
+ conf->log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+ mddev_resume(mddev);
+}
+
static void r5l_submit_current_io(struct r5l_log *log)
{
struct r5l_io_unit *io = log->current_io;
@@ -2311,6 +2328,9 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
val > R5C_JOURNAL_MODE_WRITE_BACK)
return -EINVAL;
+ if (calc_degraded(conf) > 0 && val == R5C_JOURNAL_MODE_WRITE_BACK)
+ return -EINVAL;
+
mddev_suspend(mddev);
conf->log->r5c_journal_mode = val;
mddev_resume(mddev);
@@ -2369,6 +2389,16 @@ int r5c_try_caching_write(struct r5conf *conf,
set_bit(STRIPE_R5C_CACHING, &sh->state);
}
+ /*
+ * When run in degraded mode, array is set to write-through mode.
+ * This check helps drain pending write safely in the transition to
+ * write-through mode.
+ */
+ if (s->failed) {
+ r5c_make_stripe_write_out(sh);
+ return -EAGAIN;
+ }
+
for (i = disks; i--; ) {
dev = &sh->dev[i];
/* if non-overwrite, use writing-out phase */
@@ -2713,6 +2743,19 @@ static int r5l_load_log(struct r5l_log *log)
return ret;
}
+void r5c_update_on_rdev_error(struct mddev *mddev)
+{
+ struct r5conf *conf = mddev->private;
+ struct r5l_log *log = conf->log;
+
+ if (!log)
+ return;
+
+ if (calc_degraded(conf) > 0 &&
+ conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
+ schedule_work(&log->disable_writeback_work);
+}
+
int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
{
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -2788,6 +2831,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
spin_lock_init(&log->no_space_stripes_lock);
INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
+ INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
INIT_LIST_HEAD(&log->stripe_in_journal_list);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ad8f24c..f8223e5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
* of the two sections, and some non-in_sync devices may
* be insync in the section most affected by failed devices.
*/
-static int calc_degraded(struct r5conf *conf)
+int calc_degraded(struct r5conf *conf)
{
int degraded, degraded2;
int i;
@@ -2606,6 +2606,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
bdevname(rdev->bdev, b),
mdname(mddev),
conf->raid_disks - mddev->degraded);
+ r5c_update_on_rdev_error(mddev);
}
/*
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 8ae498c..36f28d1 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -762,6 +762,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
extern struct stripe_head *
raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce);
+extern int calc_degraded(struct r5conf *conf);
extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
extern void r5l_exit_log(struct r5l_log *log);
extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
@@ -791,4 +792,5 @@ extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
extern void r5c_check_cached_full_stripe(struct r5conf *conf);
extern struct md_sysfs_entry r5c_journal_mode;
extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
+extern void r5c_update_on_rdev_error(struct mddev *mddev);
#endif
--
2.9.3
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH v2 3/3] md/r5cache: disable write back for degraded array
2017-01-24 1:12 ` [PATCH v2 3/3] md/r5cache: disable write back for degraded array Song Liu
@ 2017-01-24 17:56 ` Shaohua Li
0 siblings, 0 replies; 4+ messages in thread
From: Shaohua Li @ 2017-01-24 17:56 UTC (permalink / raw)
To: Song Liu
Cc: linux-raid, neilb, shli, kernel-team, dan.j.williams, hch,
liuzhengyuan, liuyun01, Jes.Sorensen
On Mon, Jan 23, 2017 at 05:12:59PM -0800, Song Liu wrote:
> write-back cache in degraded mode introduces corner cases to the array.
> Although we try to cover all these corner cases, it is safer to just
> disable write-back cache when the array is in degraded mode.
>
> In this patch, we disable writeback cache for degraded mode:
> 1. On device failure, if the array enters degraded mode, raid5_error()
> will submit async job r5c_disable_writeback_async to disable
> writeback;
> 2. In r5c_journal_mode_store(), it is invalid to enable writeback in
> degraded mode;
> 3. In r5c_try_caching_write(), stripes with s->failed>0 will be handled
> in write-through mode.
Applied the first 2, have some comments about this one, please see below
> Signed-off-by: Song Liu <songliubraving@fb.com>
> ---
> drivers/md/raid5-cache.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
> drivers/md/raid5.c | 3 ++-
> drivers/md/raid5.h | 2 ++
> 3 files changed, 48 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
> index 00d2838..55f1a37 100644
> --- a/drivers/md/raid5-cache.c
> +++ b/drivers/md/raid5-cache.c
> @@ -164,6 +164,9 @@ struct r5l_log {
> /* to submit async io_units, to fulfill ordering of flush */
> struct work_struct deferred_io_work;
>
> + /* to disable write back during in degraded mode */
> + struct work_struct disable_writeback_work;
> +
> /* to for chunk_aligned_read in writeback mode, details below */
> spinlock_t tree_lock;
> struct radix_tree_root big_stripe_tree;
> @@ -653,6 +656,20 @@ static void r5l_submit_io_async(struct work_struct *work)
> r5l_do_submit_io(log, io);
> }
>
> +static void r5c_disable_writeback_async(struct work_struct *work)
> +{
> + struct r5l_log *log = container_of(work, struct r5l_log,
> + disable_writeback_work);
> + struct mddev *mddev = log->rdev->mddev;
> + struct r5conf *conf = mddev->private;
> +
> + pr_crit("md/raid:%s: Disabling writeback cache for degraded array.\n",
> + mdname(mddev));
does this need to be pr_crit? This isn't an error. So I think pr_info is more
appropriate.
> + mddev_suspend(mddev);
> + conf->log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
> + mddev_resume(mddev);
> +}
> +
> static void r5l_submit_current_io(struct r5l_log *log)
> {
> struct r5l_io_unit *io = log->current_io;
> @@ -2311,6 +2328,9 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
> val > R5C_JOURNAL_MODE_WRITE_BACK)
> return -EINVAL;
>
> + if (calc_degraded(conf) > 0 && val == R5C_JOURNAL_MODE_WRITE_BACK)
> + return -EINVAL;
> +
> mddev_suspend(mddev);
> conf->log->r5c_journal_mode = val;
> mddev_resume(mddev);
> @@ -2369,6 +2389,16 @@ int r5c_try_caching_write(struct r5conf *conf,
> set_bit(STRIPE_R5C_CACHING, &sh->state);
> }
>
> + /*
> + * When run in degraded mode, array is set to write-through mode.
> + * This check helps drain pending write safely in the transition to
> + * write-through mode.
> + */
> + if (s->failed) {
> + r5c_make_stripe_write_out(sh);
> + return -EAGAIN;
> + }
> +
> for (i = disks; i--; ) {
> dev = &sh->dev[i];
> /* if non-overwrite, use writing-out phase */
> @@ -2713,6 +2743,19 @@ static int r5l_load_log(struct r5l_log *log)
> return ret;
> }
>
> +void r5c_update_on_rdev_error(struct mddev *mddev)
> +{
> + struct r5conf *conf = mddev->private;
> + struct r5l_log *log = conf->log;
> +
> + if (!log)
> + return;
> +
> + if (calc_degraded(conf) > 0 &&
> + conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
> + schedule_work(&log->disable_writeback_work);
> +}
> +
> int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
> {
> struct request_queue *q = bdev_get_queue(rdev->bdev);
> @@ -2788,6 +2831,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
> spin_lock_init(&log->no_space_stripes_lock);
>
> INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
> + INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
In teardown, we need to make sure the work is finished. so please flush the
work at that time.
> log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
> INIT_LIST_HEAD(&log->stripe_in_journal_list);
> diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
> index ad8f24c..f8223e5 100644
> --- a/drivers/md/raid5.c
> +++ b/drivers/md/raid5.c
> @@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
> * of the two sections, and some non-in_sync devices may
> * be insync in the section most affected by failed devices.
> */
> -static int calc_degraded(struct r5conf *conf)
> +int calc_degraded(struct r5conf *conf)
Since this one is exported to other file, let's rename it to raid5_calc_degraded
Thanks,
Shaohua
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2017-01-24 17:56 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-01-24 1:12 [PATCH v2 1/3] md/r5cache: flush data only stripes in r5l_recovery_log() Song Liu
2017-01-24 1:12 ` [PATCH v2 2/3] md/r5cache: shift complex rmw from read path to write path Song Liu
2017-01-24 1:12 ` [PATCH v2 3/3] md/r5cache: disable write back for degraded array Song Liu
2017-01-24 17:56 ` Shaohua Li
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox