From: NeilBrown <neilb@suse.de>
To: Andrew Morton <akpm@osdl.org>
Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH 011 of 11] md: Split reshape portion of raid5 sync_request into a separate function.
Date: Mon, 1 May 2006 15:31:05 +1000 [thread overview]
Message-ID: <1060501053105.23033@suse.de> (raw)
In-Reply-To: 20060501152229.18367.patches@notabene
... as raid5 sync_request is WAY too big.
Signed-off-by: Neil Brown <neilb@suse.de>
### Diffstat output
./drivers/md/raid5.c | 244 ++++++++++++++++++++++++++-------------------------
1 file changed, 127 insertions(+), 117 deletions(-)
diff ./drivers/md/raid5.c~current~ ./drivers/md/raid5.c
--- ./drivers/md/raid5.c~current~ 2006-05-01 15:12:34.000000000 +1000
+++ ./drivers/md/raid5.c 2006-05-01 15:13:41.000000000 +1000
@@ -2696,13 +2696,136 @@ static int make_request(request_queue_t
return 0;
}
-/* FIXME go_faster isn't used */
-static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
{
+ /* reshaping is quite different to recovery/resync so it is
+ * handled quite separately ... here.
+ *
+ * On each call to sync_request, we gather one chunk worth of
+ * destination stripes and flag them as expanding.
+ * Then we find all the source stripes and request reads.
+ * As the reads complete, handle_stripe will copy the data
+ * into the destination stripe and release that stripe.
+ */
raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
struct stripe_head *sh;
int pd_idx;
sector_t first_sector, last_sector;
+ int raid_disks;
+ int data_disks;
+ int i;
+ int dd_idx;
+ sector_t writepos, safepos, gap;
+
+ if (sector_nr == 0 &&
+ conf->expand_progress != 0) {
+ /* restarting in the middle, skip the initial sectors */
+ sector_nr = conf->expand_progress;
+ sector_div(sector_nr, conf->raid_disks-1);
+ *skipped = 1;
+ return sector_nr;
+ }
+
+ /* we update the metadata when there is more than 3Meg
+ * in the block range (that is rather arbitrary, should
+ * probably be time based) or when the data about to be
+ * copied would over-write the source of the data at
+ * the front of the range.
+ * i.e. one new_stripe forward from expand_progress new_maps
+ * to after where expand_lo old_maps to
+ */
+ writepos = conf->expand_progress +
+ conf->chunk_size/512*(conf->raid_disks-1);
+ sector_div(writepos, conf->raid_disks-1);
+ safepos = conf->expand_lo;
+ sector_div(safepos, conf->previous_raid_disks-1);
+ gap = conf->expand_progress - conf->expand_lo;
+
+ if (writepos >= safepos ||
+ gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
+ /* Cannot proceed until we've updated the superblock... */
+ wait_event(conf->wait_for_overlap,
+ atomic_read(&conf->reshape_stripes)==0);
+ mddev->reshape_position = conf->expand_progress;
+ mddev->sb_dirty = 1;
+ md_wakeup_thread(mddev->thread);
+ wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
+ kthread_should_stop());
+ spin_lock_irq(&conf->device_lock);
+ conf->expand_lo = mddev->reshape_position;
+ spin_unlock_irq(&conf->device_lock);
+ wake_up(&conf->wait_for_overlap);
+ }
+
+ for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
+ int j;
+ int skipped = 0;
+ pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
+ sh = get_active_stripe(conf, sector_nr+i,
+ conf->raid_disks, pd_idx, 0);
+ set_bit(STRIPE_EXPANDING, &sh->state);
+ atomic_inc(&conf->reshape_stripes);
+ /* If any of this stripe is beyond the end of the old
+ * array, then we need to zero those blocks
+ */
+ for (j=sh->disks; j--;) {
+ sector_t s;
+ if (j == sh->pd_idx)
+ continue;
+ s = compute_blocknr(sh, j);
+ if (s < (mddev->array_size<<1)) {
+ skipped = 1;
+ continue;
+ }
+ memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
+ set_bit(R5_Expanded, &sh->dev[j].flags);
+ set_bit(R5_UPTODATE, &sh->dev[j].flags);
+ }
+ if (!skipped) {
+ set_bit(STRIPE_EXPAND_READY, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+ release_stripe(sh);
+ }
+ spin_lock_irq(&conf->device_lock);
+ conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
+ spin_unlock_irq(&conf->device_lock);
+ /* Ok, those stripe are ready. We can start scheduling
+ * reads on the source stripes.
+ * The source stripes are determined by mapping the first and last
+ * block on the destination stripes.
+ */
+ raid_disks = conf->previous_raid_disks;
+ data_disks = raid_disks - 1;
+ first_sector =
+ raid5_compute_sector(sector_nr*(conf->raid_disks-1),
+ raid_disks, data_disks,
+ &dd_idx, &pd_idx, conf);
+ last_sector =
+ raid5_compute_sector((sector_nr+conf->chunk_size/512)
+ *(conf->raid_disks-1) -1,
+ raid_disks, data_disks,
+ &dd_idx, &pd_idx, conf);
+ if (last_sector >= (mddev->size<<1))
+ last_sector = (mddev->size<<1)-1;
+ while (first_sector <= last_sector) {
+ pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
+ sh = get_active_stripe(conf, first_sector,
+ conf->previous_raid_disks, pd_idx, 0);
+ set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+ first_sector += STRIPE_SECTORS;
+ }
+ return conf->chunk_size>>9;
+}
+
+/* FIXME go_faster isn't used */
+static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
+{
+ raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ struct stripe_head *sh;
+ int pd_idx;
int raid_disks = conf->raid_disks;
int data_disks = raid_disks - conf->max_degraded;
sector_t max_sector = mddev->size << 1;
@@ -2728,122 +2851,9 @@ static sector_t sync_request(mddev_t *md
return 0;
}
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
- /* reshaping is quite different to recovery/resync so it is
- * handled quite separately ... here.
- *
- * On each call to sync_request, we gather one chunk worth of
- * destination stripes and flag them as expanding.
- * Then we find all the source stripes and request reads.
- * As the reads complete, handle_stripe will copy the data
- * into the destination stripe and release that stripe.
- */
- int i;
- int dd_idx;
- sector_t writepos, safepos, gap;
-
- if (sector_nr == 0 &&
- conf->expand_progress != 0) {
- /* restarting in the middle, skip the initial sectors */
- sector_nr = conf->expand_progress;
- sector_div(sector_nr, conf->raid_disks-1);
- *skipped = 1;
- return sector_nr;
- }
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ return reshape_request(mddev, sector_nr, skipped);
- /* we update the metadata when there is more than 3Meg
- * in the block range (that is rather arbitrary, should
- * probably be time based) or when the data about to be
- * copied would over-write the source of the data at
- * the front of the range.
- * i.e. one new_stripe forward from expand_progress new_maps
- * to after where expand_lo old_maps to
- */
- writepos = conf->expand_progress +
- conf->chunk_size/512*(conf->raid_disks-1);
- sector_div(writepos, conf->raid_disks-1);
- safepos = conf->expand_lo;
- sector_div(safepos, conf->previous_raid_disks-1);
- gap = conf->expand_progress - conf->expand_lo;
-
- if (writepos >= safepos ||
- gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
- /* Cannot proceed until we've updated the superblock... */
- wait_event(conf->wait_for_overlap,
- atomic_read(&conf->reshape_stripes)==0);
- mddev->reshape_position = conf->expand_progress;
- mddev->sb_dirty = 1;
- md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
- kthread_should_stop());
- spin_lock_irq(&conf->device_lock);
- conf->expand_lo = mddev->reshape_position;
- spin_unlock_irq(&conf->device_lock);
- wake_up(&conf->wait_for_overlap);
- }
-
- for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
- int j;
- int skipped = 0;
- pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
- sh = get_active_stripe(conf, sector_nr+i,
- conf->raid_disks, pd_idx, 0);
- set_bit(STRIPE_EXPANDING, &sh->state);
- atomic_inc(&conf->reshape_stripes);
- /* If any of this stripe is beyond the end of the old
- * array, then we need to zero those blocks
- */
- for (j=sh->disks; j--;) {
- sector_t s;
- if (j == sh->pd_idx)
- continue;
- s = compute_blocknr(sh, j);
- if (s < (mddev->array_size<<1)) {
- skipped = 1;
- continue;
- }
- memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
- set_bit(R5_Expanded, &sh->dev[j].flags);
- set_bit(R5_UPTODATE, &sh->dev[j].flags);
- }
- if (!skipped) {
- set_bit(STRIPE_EXPAND_READY, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- release_stripe(sh);
- }
- spin_lock_irq(&conf->device_lock);
- conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
- spin_unlock_irq(&conf->device_lock);
- /* Ok, those stripe are ready. We can start scheduling
- * reads on the source stripes.
- * The source stripes are determined by mapping the first and last
- * block on the destination stripes.
- */
- raid_disks = conf->previous_raid_disks;
- data_disks = raid_disks - 1;
- first_sector =
- raid5_compute_sector(sector_nr*(conf->raid_disks-1),
- raid_disks, data_disks,
- &dd_idx, &pd_idx, conf);
- last_sector =
- raid5_compute_sector((sector_nr+conf->chunk_size/512)
- *(conf->raid_disks-1) -1,
- raid_disks, data_disks,
- &dd_idx, &pd_idx, conf);
- if (last_sector >= (mddev->size<<1))
- last_sector = (mddev->size<<1)-1;
- while (first_sector <= last_sector) {
- pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
- sh = get_active_stripe(conf, first_sector,
- conf->previous_raid_disks, pd_idx, 0);
- set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
- first_sector += STRIPE_SECTORS;
- }
- return conf->chunk_size>>9;
- }
/* if there is too many failed drives and we are trying
* to resync, then assert that we are finished, because there is
* nothing we can do.
next prev parent reply other threads:[~2006-05-01 5:31 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2006-05-01 5:29 [PATCH 000 of 11] md: Introduction - assort md enhancements for 2.6.18 NeilBrown
2006-05-01 5:30 ` [PATCH 001 of 11] md: Reformat code in raid1_end_write_request to avoid goto NeilBrown
2006-05-01 5:30 ` [PATCH 002 of 11] md: Remove arbitrary limit on chunk size NeilBrown
2006-05-01 5:30 ` [PATCH 003 of 11] md: Remove useless ioctl warning NeilBrown
2006-05-01 5:30 ` [PATCH 004 of 11] md: Increase the delay before marking metadata clean, and make it configurable NeilBrown
2006-05-01 5:44 ` Andrew Morton
2006-05-01 6:02 ` Neil Brown
2006-05-01 6:13 ` Andrew Morton
2006-05-01 15:17 ` Linus Torvalds
2006-05-01 6:15 ` Nick Piggin
2006-05-02 5:56 ` bert hubert
2006-05-09 1:40 ` Neil Brown
2006-05-01 5:30 ` [PATCH 006 of 11] md: Remove nuisance message at shutdown NeilBrown
2006-05-01 5:30 ` [PATCH 007 of 11] md: Allow checkpoint of recovery with version-1 superblock NeilBrown
2006-05-01 5:30 ` [PATCH 008 of 11] md: Allow a linear array to have drives added while active NeilBrown
2006-05-01 5:30 ` [PATCH 009 of 11] md: Support stripe/offset mode in raid10 NeilBrown
2006-05-02 16:38 ` Al Boldi
2006-05-03 0:05 ` Neil Brown
2006-05-03 4:00 ` Al Boldi
2006-05-08 7:17 ` Neil Brown
2006-05-08 16:59 ` Al Boldi
2006-05-17 21:32 ` Raid5 resize "testing opportunity" Patrik Jonsson
2006-05-17 23:49 ` Neil Brown
2006-05-19 0:40 ` Patrik Jonsson
2006-05-19 0:44 ` Neil Brown
2006-05-19 20:11 ` Per Lindstrand
2006-05-01 5:31 ` [PATCH 010 of 11] md: make md_print_devices() static NeilBrown
2006-05-01 5:31 ` NeilBrown [this message]
[not found] ` <1060501053025.22961@suse.de>
2006-05-01 5:40 ` [PATCH 005 of 11] md: Merge raid5 and raid6 code H. Peter Anvin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1060501053105.23033@suse.de \
--to=neilb@suse.de \
--cc=akpm@osdl.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-raid@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).