Linux Btrfs filesystem development
 help / color / mirror / Atom feed
* [PATCH] btrfs: Limit size of bios submitted from writeback
@ 2026-04-22  9:42 Jan Kara
  2026-04-22 10:29 ` Qu Wenruo
  0 siblings, 1 reply; 5+ messages in thread
From: Jan Kara @ 2026-04-22  9:42 UTC (permalink / raw)
  To: David Sterba; +Cc: linux-btrfs, Jan Kara

Currently btrfs_writepages() just accumulates as large bio as possible
(within writeback_control constraints) and then submits it. This can
however lead to significant latency in writeback IO submission (I have
observed tens of miliseconds) because the submitted bio easily has over
hundred of megabytes. Consequently this leads to IO pipeline stalls and
reduced throughput.

At the same time beyond certain size submitting so large bio provides
diminishing returns because the bio is split by the block layer
immediately anyway. So compute (estimate of) bio size beyond which we
are unlikely to improve performance and just submit the bio for
writeback once we accumulate that much to keep the IO pipeline busy.
This improves writeback throughput for sequential writes by about 15% on
the test machine I was using.

Signed-off-by: Jan Kara <jack@suse.cz>
---
 fs/btrfs/disk-io.c   |  7 ++++++
 fs/btrfs/extent_io.c | 10 ++++++++
 fs/btrfs/fs.h        |  1 +
 fs/btrfs/volumes.c   | 54 ++++++++++++++++++++++++++++++++++++++++++++
 fs/btrfs/volumes.h   |  1 +
 5 files changed, 73 insertions(+)

diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8a11be02eeb9..f063595d0cee 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3591,6 +3591,13 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
 		}
 	}
 
+	ret = btrfs_init_writeback_bio_size(fs_info);
+	if (ret) {
+		btrfs_err(fs_info, "failed to get optimum writeback size: %d",
+			  ret);
+		goto fail_sysfs;
+	}
+
 	btrfs_free_zone_cache(fs_info);
 
 	btrfs_check_active_zone_reservation(fs_info);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ca3e4b99aec2..9c603d59a09b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2555,6 +2555,16 @@ static int extent_write_cache_pages(struct address_space *mapping,
 				break;
 			}
 
+			/*
+			 * If we have accumulated decent amount of IO, send it
+			 * to the block layer so that IO can run while we are
+			 * accumulating more folios to write.
+			 */
+			if (bio_ctrl->bbio &&
+			    bio_ctrl->bbio->bio.bi_iter.bi_size >=
+			    inode_to_fs_info(inode)->writeback_bio_size)
+				submit_write_bio(bio_ctrl, 0);
+
 			/*
 			 * The filesystem may choose to bump up nr_to_write.
 			 * We have to make sure to honor the new nr_to_write
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index a4758d94b32e..19e02452ab96 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -880,6 +880,7 @@ struct btrfs_fs_info {
 	u32 block_min_order;
 	u32 block_max_order;
 	u32 stripesize;
+	u32 writeback_bio_size;
 	u32 csum_size;
 	u32 csums_per_leaf;
 	u32 csum_type;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a88e68f90564..cb654e990333 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -8179,6 +8179,60 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
 	return ret;
 }
 
+/*
+ * At maximum we submit writeback bios 64MB in size to avoid too large
+ * submission latencies
+ */
+#define BTRFS_MAX_WB_BIO_SIZE (64 << 20)
+
+int btrfs_init_writeback_bio_size(struct btrfs_fs_info *fs_info)
+{
+	struct rb_node *node;
+	u32 writeback_bio_sectors = 1;
+
+	read_lock(&fs_info->mapping_tree_lock);
+	/*
+	 * For each data chunk compute the size of bio large enough to submit
+	 * optimum size request for each of chunk's disk and take maximum
+	 * over all data chunks.
+	 */
+	for (node = rb_first_cached(&fs_info->mapping_tree); node;
+	     node = rb_next(node)) {
+		struct btrfs_chunk_map *map;
+		unsigned int data_stripes, opt_rq_size = fs_info->sectorsize;
+		int i;
+
+		map = rb_entry(node, struct btrfs_chunk_map, rb_node);
+		if (!(map->type & BTRFS_BLOCK_GROUP_DATA))
+			continue;
+		data_stripes = calc_data_stripes(map->type, map->num_stripes);
+		for (i = 0; i < map->num_stripes; i++) {
+			struct request_queue *queue;
+			unsigned int io_opt;
+
+			if (!map->stripes[i].dev)
+				continue;
+			queue = bdev_get_queue(map->stripes[i].dev->bdev);
+			io_opt = queue_io_opt(queue) ? :
+				queue_max_sectors(queue) << SECTOR_SHIFT;
+			opt_rq_size = max(opt_rq_size, io_opt);
+		}
+		opt_rq_size >>= fs_info->sectorsize_bits;
+		writeback_bio_sectors = max(writeback_bio_sectors,
+					    data_stripes * opt_rq_size);
+	}
+	read_unlock(&fs_info->mapping_tree_lock);
+
+	if (BTRFS_MAX_WB_BIO_SIZE >> fs_info->sectorsize_bits <=
+						writeback_bio_sectors)
+		fs_info->writeback_bio_size = BTRFS_MAX_WB_BIO_SIZE;
+	else
+		fs_info->writeback_bio_size =
+			writeback_bio_sectors << fs_info->sectorsize_bits;
+
+	return 0;
+}
+
 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
 				struct btrfs_device *device)
 {
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 0082c166af91..96904d18f686 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -784,6 +784,7 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
 			struct btrfs_ioctl_get_dev_stats *stats);
 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
+int btrfs_init_writeback_bio_size(struct btrfs_fs_info *fs_info);
 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2026-04-23  7:57 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-22  9:42 [PATCH] btrfs: Limit size of bios submitted from writeback Jan Kara
2026-04-22 10:29 ` Qu Wenruo
2026-04-22 12:49   ` Jan Kara
2026-04-22 21:43     ` Qu Wenruo
2026-04-23  7:57       ` Jan Kara

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox