From: Christoph Hellwig <hch@lst.de>
To: Josef Bacik <josef@toxicpanda.com>,
David Sterba <dsterba@suse.com>, Qu Wenruo <wqu@suse.com>
Cc: Naohiro Aota <naohiro.aota@wdc.com>,
linux-btrfs@vger.kernel.org, linux-fsdevel@vger.kernel.org
Subject: [PATCH 25/40] btrfs: remove btrfs_wq_submit_bio
Date: Tue, 22 Mar 2022 16:55:51 +0100 [thread overview]
Message-ID: <20220322155606.1267165-26-hch@lst.de> (raw)
In-Reply-To: <20220322155606.1267165-1-hch@lst.de>
Reuse the btrfs_work in struct btrfs_bio for asynchronous submission
and remove the extra allocation for async write bios.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
fs/btrfs/disk-io.c | 122 +++++++++++++--------------------------------
fs/btrfs/disk-io.h | 8 +--
fs/btrfs/inode.c | 42 +++++++++-------
3 files changed, 62 insertions(+), 110 deletions(-)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index bb910b78bbc82..59c1dc0b37399 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -69,23 +69,6 @@ static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
crypto_free_shash(fs_info->csum_shash);
}
-/*
- * async submit bios are used to offload expensive checksumming
- * onto the worker threads. They checksum file and metadata bios
- * just before they are sent down the IO stack.
- */
-struct async_submit_bio {
- struct inode *inode;
- struct bio *bio;
- extent_submit_bio_start_t *submit_bio_start;
- int mirror_num;
-
- /* Optional parameter for submit_bio_start used by direct io */
- u64 dio_file_offset;
- struct btrfs_work work;
- blk_status_t status;
-};
-
/*
* Lockdep class keys for extent_buffer->lock's in this root. For a given
* eb, the lockdep key is determined by the btrfs_root it belongs to and
@@ -691,18 +674,6 @@ int btrfs_validate_metadata_buffer(struct btrfs_bio *bbio,
return ret;
}
-static void run_one_async_start(struct btrfs_work *work)
-{
- struct async_submit_bio *async;
- blk_status_t ret;
-
- async = container_of(work, struct async_submit_bio, work);
- ret = async->submit_bio_start(async->inode, async->bio,
- async->dio_file_offset);
- if (ret)
- async->status = ret;
-}
-
/*
* In order to insert checksums into the metadata in large chunks, we wait
* until bio submission time. All the pages in the bio are checksummed and
@@ -711,72 +682,51 @@ static void run_one_async_start(struct btrfs_work *work)
* At IO completion time the csums attached on the ordered extent record are
* inserted into the tree.
*/
-static void run_one_async_done(struct btrfs_work *work)
+static void btrfs_submit_bio_work(struct btrfs_work *work)
{
- struct async_submit_bio *async;
- struct inode *inode;
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, work);
+ struct btrfs_fs_info *fs_info = btrfs_sb(bbio->inode->i_sb);
+ struct bio *bio = &bbio->bio;
blk_status_t ret;
- async = container_of(work, struct async_submit_bio, work);
- inode = async->inode;
+ /* Ensure the bio doesn't go away while linked into the workqueue */
+ bio_get(bio);
/* If an error occurred we just want to clean up the bio and move on */
- if (async->status) {
- async->bio->bi_status = async->status;
- bio_endio(async->bio);
+ if (bio->bi_status) {
+ bio_endio(bio);
return;
}
/*
- * All of the bios that pass through here are from async helpers.
- * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context.
- * This changes nothing when cgroups aren't in use.
+ * Use REQ_CGROUP_PUNT to issue the bio from the owning cgroup's
+ * context. This changes nothing when cgroups aren't in use.
*/
- async->bio->bi_opf |= REQ_CGROUP_PUNT;
- ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
+ bio->bi_opf |= REQ_CGROUP_PUNT;
+ ret = btrfs_map_bio(fs_info, bio, bbio->mirror_num);
if (ret) {
- async->bio->bi_status = ret;
- bio_endio(async->bio);
+ bio->bi_status = ret;
+ bio_endio(bio);
}
}
-static void run_one_async_free(struct btrfs_work *work)
+static void btrfs_submit_bio_done(struct btrfs_work *work)
{
- struct async_submit_bio *async;
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, work);
- async = container_of(work, struct async_submit_bio, work);
- kfree(async);
+ bio_put(&bbio->bio);
}
-blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
- int mirror_num, unsigned long bio_flags,
- u64 dio_file_offset,
- extent_submit_bio_start_t *submit_bio_start)
+void btrfs_submit_bio_async(struct btrfs_bio *bbio,
+ void (*start)(struct btrfs_work *work))
{
- struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
- struct async_submit_bio *async;
+ ASSERT(bbio->end_io_type == BTRFS_ENDIO_NONE);
- async = kmalloc(sizeof(*async), GFP_NOFS);
- if (!async)
- return BLK_STS_RESOURCE;
-
- async->inode = inode;
- async->bio = bio;
- async->mirror_num = mirror_num;
- async->submit_bio_start = submit_bio_start;
-
- btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
- run_one_async_free);
-
- async->dio_file_offset = dio_file_offset;
-
- async->status = 0;
-
- if (op_is_sync(bio->bi_opf))
- btrfs_set_work_high_priority(&async->work);
-
- btrfs_queue_work(fs_info->workers, &async->work);
- return 0;
+ btrfs_init_work(&bbio->work, start, btrfs_submit_bio_work,
+ btrfs_submit_bio_done);
+ if (op_is_sync(bbio->bio.bi_opf))
+ btrfs_set_work_high_priority(&bbio->work);
+ btrfs_queue_work(btrfs_sb(bbio->inode->i_sb)->workers, &bbio->work);
}
static blk_status_t btree_csum_one_bio(struct bio *bio)
@@ -797,14 +747,11 @@ static blk_status_t btree_csum_one_bio(struct bio *bio)
return errno_to_blk_status(ret);
}
-static blk_status_t btree_submit_bio_start(struct inode *inode, struct bio *bio,
- u64 dio_file_offset)
+static void btree_submit_bio_start(struct btrfs_work *work)
{
- /*
- * when we're called for a write, we're already in the async
- * submission context. Just jump into btrfs_map_bio
- */
- return btree_csum_one_bio(bio);
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, work);
+
+ bbio->bio.bi_status = btree_csum_one_bio(&bbio->bio);
}
/*
@@ -827,18 +774,21 @@ blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
int mirror_num)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_bio *bbio = btrfs_bio(bio);
blk_status_t ret;
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
- if (should_async_write(fs_info, BTRFS_I(inode)))
- return btrfs_wq_submit_bio(inode, bio, mirror_num, 0, 0,
- btree_submit_bio_start);
+ if (should_async_write(fs_info, BTRFS_I(inode))) {
+ bbio->mirror_num = mirror_num;
+ btrfs_submit_bio_async(bbio, btree_submit_bio_start);
+ return BLK_STS_OK;
+ }
ret = btree_csum_one_bio(bio);
if (ret)
return ret;
} else {
/* checksum validation should happen in async threads: */
- btrfs_bio(bio)->end_io_type = BTRFS_ENDIO_WQ_METADATA_READ;
+ bbio->end_io_type = BTRFS_ENDIO_WQ_METADATA_READ;
}
return btrfs_map_bio(fs_info, bio, mirror_num);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index e8900c1b71664..25fe657ebbac1 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -113,12 +113,8 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
int atomic);
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
struct btrfs_key *first_key);
-blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
- int mirror_num, unsigned long bio_flags,
- u64 dio_file_offset,
- extent_submit_bio_start_t *submit_bio_start);
-blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
- int mirror_num);
+void btrfs_submit_bio_async(struct btrfs_bio *bbio,
+ void (*start)(struct btrfs_work *work));
int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5a5474fac0b28..70d82effe5e37 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2300,17 +2300,19 @@ void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
}
/*
- * in order to insert checksums into the metadata in large chunks,
- * we wait until bio submission time. All the pages in the bio are
- * checksummed and sums are attached onto the ordered extent record.
+ * In order to insert checksums into the metadata in large chunks, we wait until
+ * bio submission time. All the pages in the bio are checksummed and sums are
+ * attached onto the ordered extent record.
*
- * At IO completion time the cums attached on the ordered extent record
- * are inserted into the btree
+ * At I/O completion time the cums attached on the ordered extent record are
+ * inserted into the btree.
*/
-static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
- u64 dio_file_offset)
+static void btrfs_submit_bio_start(struct btrfs_work *work)
{
- return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, work);
+
+ bbio->bio.bi_status =
+ btrfs_csum_one_bio(BTRFS_I(bbio->inode), &bbio->bio, 0, 0);
}
/*
@@ -2531,8 +2533,9 @@ blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
/* csum items have already been cloned */
if (btrfs_is_data_reloc_root(bi->root))
goto mapit;
- return btrfs_wq_submit_bio(inode, bio, mirror_num, bio_flags,
- 0, btrfs_submit_bio_start);
+ bbio->mirror_num = mirror_num;
+ btrfs_submit_bio_async(bbio, btrfs_submit_bio_start);
+ return BLK_STS_OK;
}
ret = btrfs_csum_one_bio(bi, bio, 0, 0);
if (ret)
@@ -7803,11 +7806,12 @@ static void __endio_write_update_ordered(struct btrfs_inode *inode,
finish_ordered_fn, uptodate);
}
-static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode,
- struct bio *bio,
- u64 dio_file_offset)
+static void btrfs_submit_bio_start_direct_io(struct btrfs_work *work)
{
- return btrfs_csum_one_bio(BTRFS_I(inode), bio, dio_file_offset, 1);
+ struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, work);
+
+ bbio->bio.bi_status = btrfs_csum_one_bio(BTRFS_I(bbio->inode),
+ &bbio->bio, bbio->file_offset, 1);
}
static void btrfs_end_dio_bio(struct bio *bio)
@@ -7841,15 +7845,17 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_inode *bi = BTRFS_I(inode);
struct btrfs_dio_private *dip = bio->bi_private;
+ struct btrfs_bio *bbio = btrfs_bio(bio);
blk_status_t ret;
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
if (!(bi->flags & BTRFS_INODE_NODATASUM)) {
/* See btrfs_submit_data_bio for async submit rules */
- if (async_submit && !atomic_read(&bi->sync_writers))
- return btrfs_wq_submit_bio(inode, bio, 0, 0,
- file_offset,
+ if (async_submit && !atomic_read(&bi->sync_writers)) {
+ btrfs_submit_bio_async(bbio,
btrfs_submit_bio_start_direct_io);
+ return BLK_STS_OK;
+ }
/*
* If we aren't doing async submit, calculate the csum of the
@@ -7860,7 +7866,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
return ret;
}
} else {
- btrfs_bio(bio)->end_io_type = BTRFS_ENDIO_WQ_DATA_READ;
+ bbio->end_io_type = BTRFS_ENDIO_WQ_DATA_READ;
if (!(bi->flags & BTRFS_INODE_NODATASUM)) {
u64 csum_offset;
--
2.30.2
next prev parent reply other threads:[~2022-03-22 15:57 UTC|newest]
Thread overview: 81+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-22 15:55 RFC: cleanup btrfs bio handling Christoph Hellwig
2022-03-22 15:55 ` [PATCH 01/40] btrfs: fix submission hook error handling in btrfs_repair_one_sector Christoph Hellwig
2022-03-22 15:55 ` [PATCH 02/40] btrfs: fix direct I/O read repair for split bios Christoph Hellwig
2022-03-22 23:59 ` Qu Wenruo
2022-03-23 6:03 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 03/40] btrfs: fix direct I/O writes for split bios on zoned devices Christoph Hellwig
2022-03-23 0:00 ` Qu Wenruo
2022-03-23 6:04 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 04/40] btrfs: fix and document the zoned device choice in alloc_new_bio Christoph Hellwig
2022-03-22 15:55 ` [PATCH 05/40] btrfs: refactor __btrfsic_submit_bio Christoph Hellwig
2022-03-22 15:55 ` [PATCH 06/40] btrfs: split submit_bio from btrfsic checking Christoph Hellwig
2022-03-23 0:04 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 07/40] btrfs: simplify btrfsic_read_block Christoph Hellwig
2022-03-22 15:55 ` [PATCH 08/40] btrfs: simplify repair_io_failure Christoph Hellwig
2022-03-23 0:06 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 09/40] btrfs: simplify scrub_recheck_block Christoph Hellwig
2022-03-23 0:10 ` Qu Wenruo
2022-03-23 6:05 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 10/40] btrfs: simplify scrub_repair_page_from_good_copy Christoph Hellwig
2022-03-23 0:12 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 11/40] btrfs: move the call to bio_set_dev out of submit_stripe_bio Christoph Hellwig
2022-03-22 15:55 ` [PATCH 12/40] btrfs: pass a block_device to btrfs_bio_clone Christoph Hellwig
2022-03-22 15:55 ` [PATCH 13/40] btrfs: initialize ->bi_opf and ->bi_private in rbio_add_io_page Christoph Hellwig
2022-03-22 15:55 ` [PATCH 14/40] btrfs: don't allocate a btrfs_bio for raid56 per-stripe bios Christoph Hellwig
2022-03-23 0:16 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 15/40] btrfs: don't allocate a btrfs_bio for scrub bios Christoph Hellwig
2022-03-23 0:18 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 16/40] btrfs: stop using the btrfs_bio saved iter in index_rbio_pages Christoph Hellwig
2022-03-22 15:55 ` [PATCH 17/40] btrfs: remove the submit_bio_hook argument to submit_read_repair Christoph Hellwig
2022-03-23 0:20 ` Qu Wenruo
2022-03-23 6:06 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 18/40] btrfs: move more work into btrfs_end_bioc Christoph Hellwig
2022-03-23 0:29 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 19/40] btrfs: defer I/O completion based on the btrfs_raid_bio Christoph Hellwig
2022-03-22 15:55 ` [PATCH 20/40] btrfs: cleanup btrfs_submit_metadata_bio Christoph Hellwig
2022-03-23 0:34 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 21/40] btrfs: cleanup btrfs_submit_data_bio Christoph Hellwig
2022-03-23 0:44 ` Qu Wenruo
2022-03-23 6:08 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 22/40] btrfs: cleanup btrfs_submit_dio_bio Christoph Hellwig
2022-03-23 0:50 ` Qu Wenruo
2022-03-23 6:09 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 23/40] btrfs: store an inode pointer in struct btrfs_bio Christoph Hellwig
2022-03-23 0:54 ` Qu Wenruo
2022-03-23 6:11 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 24/40] btrfs: remove btrfs_end_io_wq Christoph Hellwig
2022-03-23 0:57 ` Qu Wenruo
2022-03-23 6:11 ` Christoph Hellwig
2022-03-22 15:55 ` Christoph Hellwig [this message]
2022-03-22 15:55 ` [PATCH 26/40] btrfs: refactor btrfs_map_bio Christoph Hellwig
2022-03-23 1:03 ` Qu Wenruo
2022-03-22 15:55 ` [PATCH 27/40] btrfs: clean up the raid map handling __btrfs_map_block Christoph Hellwig
2022-03-23 1:08 ` Qu Wenruo
2022-03-23 6:13 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 28/40] btrfs: do not allocate a btrfs_io_context in btrfs_map_bio Christoph Hellwig
2022-03-23 1:14 ` Qu Wenruo
2022-03-23 6:13 ` Christoph Hellwig
2022-03-23 6:59 ` Qu Wenruo
2022-03-23 7:10 ` Christoph Hellwig
2022-03-22 15:55 ` [PATCH 29/40] btrfs: do not allocate a btrfs_bio for low-level bios Christoph Hellwig
2022-03-22 15:55 ` [PATCH 30/40] iomap: add per-iomap_iter private data Christoph Hellwig
2022-03-22 15:55 ` [PATCH 31/40] iomap: add a new ->iomap_iter operation Christoph Hellwig
2022-03-22 15:55 ` [PATCH 32/40] iomap: optionally allocate dio bios from a file system bio_set Christoph Hellwig
2022-03-22 15:55 ` [PATCH 33/40] iomap: add a hint to ->submit_io if there is more I/O coming Christoph Hellwig
2022-03-22 15:56 ` [PATCH 34/40] btrfs: add a btrfs_dio_rw wrapper Christoph Hellwig
2022-03-22 15:56 ` [PATCH 35/40] btrfs: allocate dio_data on stack Christoph Hellwig
2022-03-22 15:56 ` [PATCH 36/40] btrfs: implement ->iomap_iter Christoph Hellwig
2022-03-22 15:56 ` [PATCH 37/40] btrfs: add a btrfs_get_stripe_info helper Christoph Hellwig
2022-03-23 1:23 ` Qu Wenruo
2022-03-22 15:56 ` [PATCH 38/40] btrfs: return a blk_status_t from btrfs_repair_one_sector Christoph Hellwig
2022-03-22 15:56 ` [PATCH 39/40] btrfs: pass private data end end_io handler to btrfs_repair_one_sector Christoph Hellwig
2022-03-23 1:28 ` Qu Wenruo
2022-03-23 6:15 ` Christoph Hellwig
2022-03-24 0:57 ` Sweet Tea Dorminy
2022-03-22 15:56 ` [PATCH 40/40] btrfs: use the iomap direct I/O bio directly Christoph Hellwig
2022-03-23 1:39 ` Qu Wenruo
2022-03-23 6:17 ` Christoph Hellwig
2022-03-23 8:02 ` Qu Wenruo
2022-03-23 8:11 ` Christoph Hellwig
2022-03-23 8:36 ` Qu Wenruo
2022-03-22 17:46 ` RFC: cleanup btrfs bio handling Johannes Thumshirn
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220322155606.1267165-26-hch@lst.de \
--to=hch@lst.de \
--cc=dsterba@suse.com \
--cc=josef@toxicpanda.com \
--cc=linux-btrfs@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=naohiro.aota@wdc.com \
--cc=wqu@suse.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).