From: Christoph Hellwig <hch@lst.de>
To: Josef Bacik <josef@toxicpanda.com>,
David Sterba <dsterba@suse.com>, Qu Wenruo <wqu@suse.com>
Cc: Naohiro Aota <naohiro.aota@wdc.com>,
linux-btrfs@vger.kernel.org, linux-fsdevel@vger.kernel.org
Subject: [PATCH 19/40] btrfs: defer I/O completion based on the btrfs_raid_bio
Date: Tue, 22 Mar 2022 16:55:45 +0100 [thread overview]
Message-ID: <20220322155606.1267165-20-hch@lst.de> (raw)
In-Reply-To: <20220322155606.1267165-1-hch@lst.de>
Instead of attaching a an extra allocation an indirect call to each
low-level bio issued by the RAID code, add a btrfs_work to struct
btrfs_raid_bio and only defer the per-rbio completion action. The
per-bio action for all the I/Os are trivial and can be safely done
from interrupt context.
As a nice side effect this also allows sharing the boilerplate code
for the per-bio completion.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
fs/btrfs/disk-io.c | 6 +--
fs/btrfs/disk-io.h | 1 -
fs/btrfs/raid56.c | 110 +++++++++++++++++++--------------------------
3 files changed, 46 insertions(+), 71 deletions(-)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9b8ee74144910..dc497e17dcd06 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -740,14 +740,10 @@ static void end_workqueue_bio(struct bio *bio)
wq = fs_info->endio_meta_write_workers;
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
wq = fs_info->endio_freespace_worker;
- else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
- wq = fs_info->endio_raid56_workers;
else
wq = fs_info->endio_write_workers;
} else {
- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
- wq = fs_info->endio_raid56_workers;
- else if (end_io_wq->metadata)
+ if (end_io_wq->metadata)
wq = fs_info->endio_meta_workers;
else
wq = fs_info->endio_workers;
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 5e8bef4b7563a..2364a30cd9e32 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -21,7 +21,6 @@ enum btrfs_wq_endio_type {
BTRFS_WQ_ENDIO_DATA,
BTRFS_WQ_ENDIO_METADATA,
BTRFS_WQ_ENDIO_FREE_SPACE,
- BTRFS_WQ_ENDIO_RAID56,
};
static inline u64 btrfs_sb_offset(int mirror)
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0c96e91e9ee03..69e45e14a0b39 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -145,6 +145,9 @@ struct btrfs_raid_bio {
atomic_t stripes_pending;
atomic_t error;
+
+ struct btrfs_work end_io_work;
+
/*
* these are two arrays of pointers. We allocate the
* rbio big enough to hold them both and setup their
@@ -1428,15 +1431,7 @@ static void set_bio_pages_uptodate(struct bio *bio)
SetPageUptodate(bvec->bv_page);
}
-/*
- * end io for the read phase of the rmw cycle. All the bios here are physical
- * stripe bios we've read from the disk so we can recalculate the parity of the
- * stripe.
- *
- * This will usually kick off finish_rmw once all the bios are read in, but it
- * may trigger parity reconstruction if we had any errors along the way
- */
-static void raid_rmw_end_io(struct bio *bio)
+static void raid56_bio_end_io(struct bio *bio)
{
struct btrfs_raid_bio *rbio = bio->bi_private;
@@ -1449,21 +1444,33 @@ static void raid_rmw_end_io(struct bio *bio)
if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
+ btrfs_queue_work(rbio->bioc->fs_info->endio_raid56_workers,
+ &rbio->end_io_work);
+}
- if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
- goto cleanup;
+/*
+ * End io handler for the read phase of the rmw cycle. All the bios here are
+ * physical stripe bios we've read from the disk so we can recalculate the
+ * parity of the stripe.
+ *
+ * This will usually kick off finish_rmw once all the bios are read in, but it
+ * may trigger parity reconstruction if we had any errors along the way
+ */
+static void raid56_rmw_end_io_work(struct btrfs_work *work)
+{
+ struct btrfs_raid_bio *rbio =
+ container_of(work, struct btrfs_raid_bio, end_io_work);
+
+ if (atomic_read(&rbio->error) > rbio->bioc->max_errors) {
+ rbio_orig_end_io(rbio, BLK_STS_IOERR);
+ return;
+ }
/*
- * this will normally call finish_rmw to start our write
- * but if there are any failed stripes we'll reconstruct
- * from parity first
+ * This will normally call finish_rmw to start our write but if there
+ * are any failed stripes we'll reconstruct from parity first.
*/
validate_rbio_for_rmw(rbio);
- return;
-
-cleanup:
-
- rbio_orig_end_io(rbio, BLK_STS_IOERR);
}
/*
@@ -1537,11 +1544,9 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
* touch it after that.
*/
atomic_set(&rbio->stripes_pending, bios_to_read);
+ btrfs_init_work(&rbio->end_io_work, raid56_rmw_end_io_work, NULL, NULL);
while ((bio = bio_list_pop(&bio_list))) {
- bio->bi_end_io = raid_rmw_end_io;
-
- btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
-
+ bio->bi_end_io = raid56_bio_end_io;
submit_bio(bio);
}
/* the actual write will happen once the reads are done */
@@ -1980,25 +1985,13 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
}
/*
- * This is called only for stripes we've read from disk to
- * reconstruct the parity.
+ * This is called only for stripes we've read from disk to reconstruct the
+ * parity.
*/
-static void raid_recover_end_io(struct bio *bio)
+static void raid_recover_end_io_work(struct btrfs_work *work)
{
- struct btrfs_raid_bio *rbio = bio->bi_private;
-
- /*
- * we only read stripe pages off the disk, set them
- * up to date if there were no errors
- */
- if (bio->bi_status)
- fail_bio_stripe(rbio, bio);
- else
- set_bio_pages_uptodate(bio);
- bio_put(bio);
-
- if (!atomic_dec_and_test(&rbio->stripes_pending))
- return;
+ struct btrfs_raid_bio *rbio =
+ container_of(work, struct btrfs_raid_bio, end_io_work);
if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
rbio_orig_end_io(rbio, BLK_STS_IOERR);
@@ -2082,11 +2075,10 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
* touch it after that.
*/
atomic_set(&rbio->stripes_pending, bios_to_read);
+ btrfs_init_work(&rbio->end_io_work, raid_recover_end_io_work,
+ NULL, NULL);
while ((bio = bio_list_pop(&bio_list))) {
- bio->bi_end_io = raid_recover_end_io;
-
- btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
-
+ bio->bi_end_io = raid56_bio_end_io;
submit_bio(bio);
}
@@ -2445,8 +2437,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
atomic_set(&rbio->stripes_pending, nr_data);
while ((bio = bio_list_pop(&bio_list))) {
- bio->bi_end_io = raid_write_end_io;
-
+ bio->bi_end_io = raid56_bio_end_io;
submit_bio(bio);
}
return;
@@ -2534,24 +2525,14 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
* This will usually kick off finish_rmw once all the bios are read in, but it
* may trigger parity reconstruction if we had any errors along the way
*/
-static void raid56_parity_scrub_end_io(struct bio *bio)
+static void raid56_parity_scrub_end_io_work(struct btrfs_work *work)
{
- struct btrfs_raid_bio *rbio = bio->bi_private;
-
- if (bio->bi_status)
- fail_bio_stripe(rbio, bio);
- else
- set_bio_pages_uptodate(bio);
-
- bio_put(bio);
-
- if (!atomic_dec_and_test(&rbio->stripes_pending))
- return;
+ struct btrfs_raid_bio *rbio =
+ container_of(work, struct btrfs_raid_bio, end_io_work);
/*
- * this will normally call finish_rmw to start our write
- * but if there are any failed stripes we'll reconstruct
- * from parity first
+ * This will normally call finish_rmw to start our write, but if there
+ * are any failed stripes we'll reconstruct from parity first
*/
validate_rbio_for_parity_scrub(rbio);
}
@@ -2621,11 +2602,10 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
* touch it after that.
*/
atomic_set(&rbio->stripes_pending, bios_to_read);
+ btrfs_init_work(&rbio->end_io_work, raid56_parity_scrub_end_io_work,
+ NULL, NULL);
while ((bio = bio_list_pop(&bio_list))) {
- bio->bi_end_io = raid56_parity_scrub_end_io;
-
- btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
-
+ bio->bi_end_io = raid56_bio_end_io;
submit_bio(bio);
}
/* the actual write will happen once the reads are done */
--
2.30.2
next prev parent reply other threads:[~2022-03-22 15:57 UTC|newest]
Thread overview: 81+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-22 15:55 RFC: cleanup btrfs bio handling Christoph Hellwig
2022-03-22 15:55 ` [PATCH 01/40] btrfs: fix submission hook error handling in btrfs_repair_one_sector Christoph Hellwig
2022-03-22 15:55 ` [PATCH 02/40] btrfs: fix direct I/O read repair for split bios Christoph Hellwig
2022-03-22 23:59 ` Qu Wenruo
2022-03-23 6:03 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 03/40] btrfs: fix direct I/O writes for split bios on zoned devices Christoph Hellwig
2022-03-23 0:00 ` Qu Wenruo
2022-03-23 6:04 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 04/40] btrfs: fix and document the zoned device choice in alloc_new_bio Christoph Hellwig
2022-03-22 15:55 ` [PATCH 05/40] btrfs: refactor __btrfsic_submit_bio Christoph Hellwig
2022-03-22 15:55 ` [PATCH 06/40] btrfs: split submit_bio from btrfsic checking Christoph Hellwig
2022-03-23 0:04 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 07/40] btrfs: simplify btrfsic_read_block Christoph Hellwig
2022-03-22 15:55 ` [PATCH 08/40] btrfs: simplify repair_io_failure Christoph Hellwig
2022-03-23 0:06 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 09/40] btrfs: simplify scrub_recheck_block Christoph Hellwig
2022-03-23 0:10 ` Qu Wenruo
2022-03-23 6:05 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 10/40] btrfs: simplify scrub_repair_page_from_good_copy Christoph Hellwig
2022-03-23 0:12 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 11/40] btrfs: move the call to bio_set_dev out of submit_stripe_bio Christoph Hellwig
2022-03-22 15:55 ` [PATCH 12/40] btrfs: pass a block_device to btrfs_bio_clone Christoph Hellwig
2022-03-22 15:55 ` [PATCH 13/40] btrfs: initialize ->bi_opf and ->bi_private in rbio_add_io_page Christoph Hellwig
2022-03-22 15:55 ` [PATCH 14/40] btrfs: don't allocate a btrfs_bio for raid56 per-stripe bios Christoph Hellwig
2022-03-23 0:16 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 15/40] btrfs: don't allocate a btrfs_bio for scrub bios Christoph Hellwig
2022-03-23 0:18 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 16/40] btrfs: stop using the btrfs_bio saved iter in index_rbio_pages Christoph Hellwig
2022-03-22 15:55 ` [PATCH 17/40] btrfs: remove the submit_bio_hook argument to submit_read_repair Christoph Hellwig
2022-03-23 0:20 ` Qu Wenruo
2022-03-23 6:06 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 18/40] btrfs: move more work into btrfs_end_bioc Christoph Hellwig
2022-03-23 0:29 ` Qu Wenruo
2022-03-22 15:55 ` Christoph Hellwig [this message]
2022-03-22 15:55 ` [PATCH 20/40] btrfs: cleanup btrfs_submit_metadata_bio Christoph Hellwig
2022-03-23 0:34 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 21/40] btrfs: cleanup btrfs_submit_data_bio Christoph Hellwig
2022-03-23 0:44 ` Qu Wenruo
2022-03-23 6:08 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 22/40] btrfs: cleanup btrfs_submit_dio_bio Christoph Hellwig
2022-03-23 0:50 ` Qu Wenruo
2022-03-23 6:09 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 23/40] btrfs: store an inode pointer in struct btrfs_bio Christoph Hellwig
2022-03-23 0:54 ` Qu Wenruo
2022-03-23 6:11 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 24/40] btrfs: remove btrfs_end_io_wq Christoph Hellwig
2022-03-23 0:57 ` Qu Wenruo
2022-03-23 6:11 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 25/40] btrfs: remove btrfs_wq_submit_bio Christoph Hellwig
2022-03-22 15:55 ` [PATCH 26/40] btrfs: refactor btrfs_map_bio Christoph Hellwig
2022-03-23 1:03 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 27/40] btrfs: clean up the raid map handling __btrfs_map_block Christoph Hellwig
2022-03-23 1:08 ` Qu Wenruo
2022-03-23 6:13 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 28/40] btrfs: do not allocate a btrfs_io_context in btrfs_map_bio Christoph Hellwig
2022-03-23 1:14 ` Qu Wenruo
2022-03-23 6:13 ` Christoph Hellwig
2022-03-23 6:59 ` Qu Wenruo
2022-03-23 7:10 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 29/40] btrfs: do not allocate a btrfs_bio for low-level bios Christoph Hellwig
2022-03-22 15:55 ` [PATCH 30/40] iomap: add per-iomap_iter private data Christoph Hellwig
2022-03-22 15:55 ` [PATCH 31/40] iomap: add a new ->iomap_iter operation Christoph Hellwig
2022-03-22 15:55 ` [PATCH 32/40] iomap: optionally allocate dio bios from a file system bio_set Christoph Hellwig
2022-03-22 15:55 ` [PATCH 33/40] iomap: add a hint to ->submit_io if there is more I/O coming Christoph Hellwig
2022-03-22 15:56 ` [PATCH 34/40] btrfs: add a btrfs_dio_rw wrapper Christoph Hellwig
2022-03-22 15:56 ` [PATCH 35/40] btrfs: allocate dio_data on stack Christoph Hellwig
2022-03-22 15:56 ` [PATCH 36/40] btrfs: implement ->iomap_iter Christoph Hellwig
2022-03-22 15:56 ` [PATCH 37/40] btrfs: add a btrfs_get_stripe_info helper Christoph Hellwig
2022-03-23 1:23 ` Qu Wenruo
2022-03-22 15:56 ` [PATCH 38/40] btrfs: return a blk_status_t from btrfs_repair_one_sector Christoph Hellwig
2022-03-22 15:56 ` [PATCH 39/40] btrfs: pass private data end end_io handler to btrfs_repair_one_sector Christoph Hellwig
2022-03-23 1:28 ` Qu Wenruo
2022-03-23 6:15 ` Christoph Hellwig
2022-03-24 0:57 ` Sweet Tea Dorminy
2022-03-22 15:56 ` [PATCH 40/40] btrfs: use the iomap direct I/O bio directly Christoph Hellwig
2022-03-23 1:39 ` Qu Wenruo
2022-03-23 6:17 ` Christoph Hellwig
2022-03-23 8:02 ` Qu Wenruo
2022-03-23 8:11 ` Christoph Hellwig
2022-03-23 8:36 ` Qu Wenruo
2022-03-22 17:46 ` RFC: cleanup btrfs bio handling Johannes Thumshirn
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220322155606.1267165-20-hch@lst.de \
--to=hch@lst.de \
--cc=dsterba@suse.com \
--cc=josef@toxicpanda.com \
--cc=linux-btrfs@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=naohiro.aota@wdc.com \
--cc=wqu@suse.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox