linux-raid.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: NeilBrown <neilb@suse.de>
To: linux-raid@vger.kernel.org
Subject: [md PATCH 09/16] md/raid1: tidy up new functions: process_checks and fix_sync_read_error.
Date: Wed, 11 May 2011 16:30:31 +1000	[thread overview]
Message-ID: <20110511063031.21263.50682.stgit@notabene.brown> (raw)
In-Reply-To: <20110511062743.21263.72802.stgit@notabene.brown>

These changes are mostly cosmetic:

1/ change mddev->raid_disks to conf->raid_disks because the later is
   technically safer, though in current practice it doesn't matter in
   this particular context.
2/ Rearrange two for / if loops to have an early 'continue' so the
   body of the 'if' doesn't need to be indented so much.

Signed-off-by: NeilBrown <neilb@suse.de>
---

 drivers/md/raid1.c |  184 +++++++++++++++++++++++++++-------------------------
 1 files changed, 95 insertions(+), 89 deletions(-)

diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 7fd7a4d..2b9e86c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1203,6 +1203,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
 		int d = r1_bio->read_disk;
 		int success = 0;
 		mdk_rdev_t *rdev;
+		int start;
 
 		if (s > (PAGE_SIZE>>9))
 			s = PAGE_SIZE >> 9;
@@ -1227,41 +1228,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
 				d = 0;
 		} while (!success && d != r1_bio->read_disk);
 
-		if (success) {
-			int start = d;
-			/* write it back and re-read */
-			set_bit(R1BIO_Uptodate, &r1_bio->state);
-			while (d != r1_bio->read_disk) {
-				if (d == 0)
-					d = conf->raid_disks;
-				d--;
-				if (r1_bio->bios[d]->bi_end_io != end_sync_read)
-					continue;
-				rdev = conf->mirrors[d].rdev;
-				atomic_add(s, &rdev->corrected_errors);
-				if (sync_page_io(rdev,
-						 sect,
-						 s<<9,
-						 bio->bi_io_vec[idx].bv_page,
-						 WRITE, false) == 0)
-					md_error(mddev, rdev);
-			}
-			d = start;
-			while (d != r1_bio->read_disk) {
-				if (d == 0)
-					d = conf->raid_disks;
-				d--;
-				if (r1_bio->bios[d]->bi_end_io != end_sync_read)
-					continue;
-				rdev = conf->mirrors[d].rdev;
-				if (sync_page_io(rdev,
-						 sect,
-						 s<<9,
-						 bio->bi_io_vec[idx].bv_page,
-						 READ, false) == 0)
-					md_error(mddev, rdev);
-			}
-		} else {
+		if (!success) {
 			char b[BDEVNAME_SIZE];
 			/* Cannot read from anywhere, array is toast */
 			md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
@@ -1274,10 +1241,47 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
 			put_buf(r1_bio);
 			return 0;
 		}
+
+		start = d;
+		/* write it back and re-read */
+		while (d != r1_bio->read_disk) {
+			if (d == 0)
+				d = conf->raid_disks;
+			d--;
+			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
+				continue;
+			rdev = conf->mirrors[d].rdev;
+			if (sync_page_io(rdev,
+					 sect,
+					 s<<9,
+					 bio->bi_io_vec[idx].bv_page,
+					 WRITE, false) == 0) {
+				r1_bio->bios[d]->bi_end_io = NULL;
+				rdev_dec_pending(rdev, mddev);
+				md_error(mddev, rdev);
+			} else
+				atomic_add(s, &rdev->corrected_errors);
+		}
+		d = start;
+		while (d != r1_bio->read_disk) {
+			if (d == 0)
+				d = conf->raid_disks;
+			d--;
+			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
+				continue;
+			rdev = conf->mirrors[d].rdev;
+			if (sync_page_io(rdev,
+					 sect,
+					 s<<9,
+					 bio->bi_io_vec[idx].bv_page,
+					 READ, false) == 0)
+				md_error(mddev, rdev);
+		}
 		sectors -= s;
 		sect += s;
 		idx ++;
 	}
+	set_bit(R1BIO_Uptodate, &r1_bio->state);
 	return 1;
 }
 
@@ -1296,7 +1300,7 @@ static int process_checks(r1bio_t *r1_bio)
 	int i;
 
 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
-		for (i=0; i<mddev->raid_disks; i++)
+		for (i=0; i < conf->raid_disks; i++)
 			if (r1_bio->bios[i]->bi_end_io == end_sync_read)
 				md_error(mddev, conf->mirrors[i].rdev);
 
@@ -1304,7 +1308,7 @@ static int process_checks(r1bio_t *r1_bio)
 		put_buf(r1_bio);
 		return -1;
 	}
-	for (primary=0; primary<mddev->raid_disks; primary++)
+	for (primary = 0; primary < conf->raid_disks; primary++)
 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
 		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
 			r1_bio->bios[primary]->bi_end_io = NULL;
@@ -1312,61 +1316,63 @@ static int process_checks(r1bio_t *r1_bio)
 			break;
 		}
 	r1_bio->read_disk = primary;
-	for (i=0; i<mddev->raid_disks; i++)
-		if (r1_bio->bios[i]->bi_end_io == end_sync_read) {
-			int j;
-			int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
-			struct bio *pbio = r1_bio->bios[primary];
-			struct bio *sbio = r1_bio->bios[i];
-
-			if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
-				for (j = vcnt; j-- ; ) {
-					struct page *p, *s;
-					p = pbio->bi_io_vec[j].bv_page;
-					s = sbio->bi_io_vec[j].bv_page;
-					if (memcmp(page_address(p),
-						   page_address(s),
-						   PAGE_SIZE))
-						break;
-				}
-			} else
-				j = 0;
-			if (j >= 0)
-				mddev->resync_mismatches += r1_bio->sectors;
-			if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
-				      && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
-				sbio->bi_end_io = NULL;
-				rdev_dec_pending(conf->mirrors[i].rdev, mddev);
-			} else {
-				/* fixup the bio for reuse */
-				int size;
-				sbio->bi_vcnt = vcnt;
-				sbio->bi_size = r1_bio->sectors << 9;
-				sbio->bi_idx = 0;
-				sbio->bi_phys_segments = 0;
-				sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
-				sbio->bi_flags |= 1 << BIO_UPTODATE;
-				sbio->bi_next = NULL;
-				sbio->bi_sector = r1_bio->sector +
-					conf->mirrors[i].rdev->data_offset;
-				sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
-				size = sbio->bi_size;
-				for (j = 0; j < vcnt ; j++) {
-					struct bio_vec *bi;
-					bi = &sbio->bi_io_vec[j];
-					bi->bv_offset = 0;
-					if (size > PAGE_SIZE)
-						bi->bv_len = PAGE_SIZE;
-					else
-						bi->bv_len = size;
-					size -= PAGE_SIZE;
-					memcpy(page_address(bi->bv_page),
-					       page_address(pbio->bi_io_vec[j].bv_page),
-					       PAGE_SIZE);
-				}
+	for (i = 0; i < conf->raid_disks; i++) {
+		int j;
+		int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
+		struct bio *pbio = r1_bio->bios[primary];
+		struct bio *sbio = r1_bio->bios[i];
+		int size;
+
+		if (r1_bio->bios[i]->bi_end_io != end_sync_read)
+			continue;
 
+		if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
+			for (j = vcnt; j-- ; ) {
+				struct page *p, *s;
+				p = pbio->bi_io_vec[j].bv_page;
+				s = sbio->bi_io_vec[j].bv_page;
+				if (memcmp(page_address(p),
+					   page_address(s),
+					   PAGE_SIZE))
+					break;
 			}
+		} else
+			j = 0;
+		if (j >= 0)
+			mddev->resync_mismatches += r1_bio->sectors;
+		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
+			      && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
+			/* No need to write to this device. */
+			sbio->bi_end_io = NULL;
+			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
+			continue;
 		}
+		/* fixup the bio for reuse */
+		sbio->bi_vcnt = vcnt;
+		sbio->bi_size = r1_bio->sectors << 9;
+		sbio->bi_idx = 0;
+		sbio->bi_phys_segments = 0;
+		sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
+		sbio->bi_flags |= 1 << BIO_UPTODATE;
+		sbio->bi_next = NULL;
+		sbio->bi_sector = r1_bio->sector +
+			conf->mirrors[i].rdev->data_offset;
+		sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+		size = sbio->bi_size;
+		for (j = 0; j < vcnt ; j++) {
+			struct bio_vec *bi;
+			bi = &sbio->bi_io_vec[j];
+			bi->bv_offset = 0;
+			if (size > PAGE_SIZE)
+				bi->bv_len = PAGE_SIZE;
+			else
+				bi->bv_len = size;
+			size -= PAGE_SIZE;
+			memcpy(page_address(bi->bv_page),
+			       page_address(pbio->bi_io_vec[j].bv_page),
+			       PAGE_SIZE);
+		}
+	}
 	return 0;
 }
 



  parent reply	other threads:[~2011-05-11  6:30 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-05-11  6:30 [md PATCH 00/16] md patches for 2.6.40 NeilBrown
2011-05-11  6:30 ` [md PATCH 02/16] md: reject a re-add request that cannot be honoured NeilBrown
2011-05-11  6:30 ` [md PATCH 01/16] md: Fix race when creating a new md device NeilBrown
2011-05-11  6:30 ` [md PATCH 04/16] md: simplify raid10 read_balance NeilBrown
2011-05-11  6:30 ` [md PATCH 03/16] md/bitmap: fix saving of events_cleared and other state NeilBrown
2011-05-11  6:30 ` [md PATCH 13/16] md/raid10: make more use of 'slot' in raid10d NeilBrown
2011-05-11  6:30 ` [md PATCH 14/16] md/raid10: remove unused variable NeilBrown
2011-05-11  6:30 ` [md PATCH 10/16] md/raid1: try fix_sync_read_error before process_checks NeilBrown
2011-05-11  6:30 ` [md PATCH 08/16] md/raid1: split out two sub-functions from sync_request_write NeilBrown
2011-05-11  6:30 ` [md PATCH 05/16] md/raid1: clean up read_balance NeilBrown
2011-05-11  6:30 ` NeilBrown [this message]
2011-05-11  6:30 ` [md PATCH 06/16] md/multipath: discard ->working_disks in favour of ->degraded NeilBrown
2011-05-11  6:30 ` [md PATCH 07/16] md: make error_handler functions more uniform and correct NeilBrown
2011-05-11  6:30 ` [md PATCH 11/16] md/raid1: improve handling of pages allocated for write-behind NeilBrown
2011-05-11  6:30 ` [md PATCH 15/16] md/raid10: reformat some loops with less indenting NeilBrown
2011-05-11  6:30 ` [md PATCH 16/16] md: allow resync_start to be set while an array is active NeilBrown
2011-05-11  6:30 ` [md PATCH 12/16] md/raid10: some tidying up in fix_read_error NeilBrown

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20110511063031.21263.50682.stgit@notabene.brown \
    --to=neilb@suse.de \
    --cc=linux-raid@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).