linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Josef Bacik <josef@toxicpanda.com>,
	David Sterba <dsterba@suse.com>, Qu Wenruo <wqu@suse.com>
Cc: Naohiro Aota <naohiro.aota@wdc.com>,
	linux-btrfs@vger.kernel.org, linux-fsdevel@vger.kernel.org
Subject: [PATCH 11/12] btrfs: don't allocate a btrfs_bio for scrub bios
Date: Mon,  4 Apr 2022 06:45:27 +0200	[thread overview]
Message-ID: <20220404044528.71167-12-hch@lst.de> (raw)
In-Reply-To: <20220404044528.71167-1-hch@lst.de>

All the scrub bios go straight to the block device or the raid56 code,
none of which looks at the btrfs_bio.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/scrub.c | 47 ++++++++++++++++++-----------------------------
 1 file changed, 18 insertions(+), 29 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 93bb480de2164..38ad158a93ba2 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1415,8 +1415,8 @@ static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
 	if (!first_page->dev->bdev)
 		goto out;
 
-	bio = btrfs_bio_alloc(BIO_MAX_VECS);
-	bio_set_dev(bio, first_page->dev->bdev);
+	bio = bio_alloc(first_page->dev->bdev, BIO_MAX_VECS, REQ_OP_READ,
+			GFP_NOFS);
 
 	for (page_num = 0; page_num < sblock->page_count; page_num++) {
 		struct scrub_page *spage = sblock->pagev[page_num];
@@ -1649,8 +1649,6 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 	}
 	sbio = sctx->wr_curr_bio;
 	if (sbio->page_count == 0) {
-		struct bio *bio;
-
 		ret = fill_writer_pointer_gap(sctx,
 					      spage->physical_for_dev_replace);
 		if (ret) {
@@ -1661,17 +1659,14 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 		sbio->physical = spage->physical_for_dev_replace;
 		sbio->logical = spage->logical;
 		sbio->dev = sctx->wr_tgtdev;
-		bio = sbio->bio;
-		if (!bio) {
-			bio = btrfs_bio_alloc(sctx->pages_per_bio);
-			sbio->bio = bio;
+		if (!sbio->bio) {
+			sbio->bio = bio_alloc(sbio->dev->bdev,
+					      sctx->pages_per_bio,
+					      REQ_OP_WRITE, GFP_NOFS);
 		}
-
-		bio->bi_private = sbio;
-		bio->bi_end_io = scrub_wr_bio_end_io;
-		bio_set_dev(bio, sbio->dev->bdev);
-		bio->bi_iter.bi_sector = sbio->physical >> 9;
-		bio->bi_opf = REQ_OP_WRITE;
+		sbio->bio->bi_private = sbio;
+		sbio->bio->bi_end_io = scrub_wr_bio_end_io;
+		sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
 		sbio->status = 0;
 	} else if (sbio->physical + sbio->page_count * sectorsize !=
 		   spage->physical_for_dev_replace ||
@@ -1712,7 +1707,6 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
 
 	sbio = sctx->wr_curr_bio;
 	sctx->wr_curr_bio = NULL;
-	WARN_ON(!sbio->bio->bi_bdev);
 	scrub_pending_bio_inc(sctx);
 	/* process all writes in a single worker thread. Then the block layer
 	 * orders the requests before sending them to the driver which
@@ -2084,22 +2078,17 @@ static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
 	}
 	sbio = sctx->bios[sctx->curr];
 	if (sbio->page_count == 0) {
-		struct bio *bio;
-
 		sbio->physical = spage->physical;
 		sbio->logical = spage->logical;
 		sbio->dev = spage->dev;
-		bio = sbio->bio;
-		if (!bio) {
-			bio = btrfs_bio_alloc(sctx->pages_per_bio);
-			sbio->bio = bio;
+		if (!sbio->bio) {
+			sbio->bio = bio_alloc(sbio->dev->bdev,
+					      sctx->pages_per_bio,
+					      REQ_OP_READ, GFP_NOFS);
 		}
-
-		bio->bi_private = sbio;
-		bio->bi_end_io = scrub_bio_end_io;
-		bio_set_dev(bio, sbio->dev->bdev);
-		bio->bi_iter.bi_sector = sbio->physical >> 9;
-		bio->bi_opf = REQ_OP_READ;
+		sbio->bio->bi_private = sbio;
+		sbio->bio->bi_end_io = scrub_bio_end_io;
+		sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
 		sbio->status = 0;
 	} else if (sbio->physical + sbio->page_count * sectorsize !=
 		   spage->physical ||
@@ -2215,7 +2204,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
 		goto bioc_out;
 	}
 
-	bio = btrfs_bio_alloc(BIO_MAX_VECS);
+	bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
 	bio->bi_iter.bi_sector = logical >> 9;
 	bio->bi_private = sblock;
 	bio->bi_end_io = scrub_missing_raid56_end_io;
@@ -2831,7 +2820,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
 	if (ret || !bioc || !bioc->raid_map)
 		goto bioc_out;
 
-	bio = btrfs_bio_alloc(BIO_MAX_VECS);
+	bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
 	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
 	bio->bi_private = sparity;
 	bio->bi_end_io = scrub_parity_bio_endio;
-- 
2.30.2


  parent reply	other threads:[~2022-04-04  4:46 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-04  4:45 cleanup btrfs bio handling, part 1 Christoph Hellwig
2022-04-04  4:45 ` [PATCH 01/12] btrfs: refactor __btrfsic_submit_bio Christoph Hellwig
2022-04-04  4:45 ` [PATCH 02/12] btrfs: split submit_bio from btrfsic checking Christoph Hellwig
2022-04-05 10:40   ` Nikolay Borisov
2022-04-04  4:45 ` [PATCH 03/12] btrfs: simplify btrfsic_read_block Christoph Hellwig
2022-04-04  4:45 ` [PATCH 04/12] btrfs: simplify repair_io_failure Christoph Hellwig
2022-04-04  4:45 ` [PATCH 05/12] btrfs: simplify scrub_recheck_block Christoph Hellwig
2022-04-05 13:49   ` Nikolay Borisov
2022-04-05 14:05     ` Christoph Hellwig
2022-04-04  4:45 ` [PATCH 06/12] btrfs: simplify scrub_repair_page_from_good_copy Christoph Hellwig
2022-04-04  4:45 ` [PATCH 07/12] btrfs: move the call to bio_set_dev out of submit_stripe_bio Christoph Hellwig
2022-04-04  4:45 ` [PATCH 08/12] btrfs: pass a block_device to btrfs_bio_clone Christoph Hellwig
2022-04-04  7:05   ` Johannes Thumshirn
2022-04-04  7:07     ` Christoph Hellwig
2022-04-04  4:45 ` [PATCH 09/12] btrfs: initialize ->bi_opf and ->bi_private in rbio_add_io_page Christoph Hellwig
2022-04-04  4:45 ` [PATCH 10/12] btrfs: don't allocate a btrfs_bio for raid56 per-stripe bios Christoph Hellwig
2022-04-04  4:45 ` Christoph Hellwig [this message]
2022-04-04  4:45 ` [PATCH 12/12] btrfs: stop using the btrfs_bio saved iter in index_rbio_pages Christoph Hellwig
2022-04-05 14:56 ` cleanup btrfs bio handling, part 1 David Sterba
2022-04-05 15:09   ` Christoph Hellwig
2022-04-06 18:00     ` David Sterba
2022-04-07  5:53       ` Christoph Hellwig
2022-04-07  6:48         ` Johannes Thumshirn
2022-04-07  7:41           ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220404044528.71167-12-hch@lst.de \
    --to=hch@lst.de \
    --cc=dsterba@suse.com \
    --cc=josef@toxicpanda.com \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=naohiro.aota@wdc.com \
    --cc=wqu@suse.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).