* [PATCH v2] btrfs: extract inlined creation into a dedicated delalloc helper
@ 2026-02-17 3:53 Qu Wenruo
0 siblings, 0 replies; only message in thread
From: Qu Wenruo @ 2026-02-17 3:53 UTC (permalink / raw)
To: linux-btrfs
Currently we call cow_file_range_inline() in different situations, from
regular cow_file_range() to compress_file_range().
This is inline creation is a little complex as the conditions are
different based on if the inline extent can be compressed.
But on the other hand, inline extent creation shouldn't be so
distributed, we can just have a dedicated branch in
btrfs_run_delalloc_range().
It will become more obvious for compressed inline cases, it makes no
sense to go through all the complex async extent mechanism just to
inline a single block.
So here we introduce a dedicated run_delalloc_inline() helper, and
remove all inline related handling from cow_file_range() and
compress_file_range().
There is a special update to inode_need_compress(), that a new
@check_inline parameter is introduced.
This is to allow inline specific checks to be done inside
run_delalloc_inline(), which requires single block compression, but
other call sites should always reject single block compression.
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
Changelog:
v2:
- Fix a bug exposed in btrfs/344
Where the inode_need_compress() check allows single block to be
compressed.
Update inode_need_compress() to accept a new @check_inline parameter,
so that only inode_need_compress() in run_delalloc_inline() will allow
single block to be compressed, meanwhile all other call sites will
reject single block compression.
- Fix a leak of extent_state
---
fs/btrfs/inode.c | 200 ++++++++++++++++++++++-------------------------
1 file changed, 95 insertions(+), 105 deletions(-)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 4523b689711d..baf400847ce8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -74,7 +74,6 @@
#include "delayed-inode.h"
#define COW_FILE_RANGE_KEEP_LOCKED (1UL << 0)
-#define COW_FILE_RANGE_NO_INLINE (1UL << 1)
struct btrfs_iget_args {
u64 ino;
@@ -703,55 +702,6 @@ static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
return ret;
}
-static noinline int cow_file_range_inline(struct btrfs_inode *inode,
- struct folio *locked_folio,
- u64 offset, u64 end,
- size_t compressed_size,
- int compress_type,
- struct folio *compressed_folio,
- bool update_i_size)
-{
- struct extent_state *cached = NULL;
- unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED;
- u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1);
- int ret;
-
- if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
- return 1;
-
- btrfs_lock_extent(&inode->io_tree, offset, end, &cached);
- ret = __cow_file_range_inline(inode, size, compressed_size,
- compress_type, compressed_folio,
- update_i_size);
- if (ret > 0) {
- btrfs_unlock_extent(&inode->io_tree, offset, end, &cached);
- return ret;
- }
-
- /*
- * In the successful case (ret == 0 here), cow_file_range will return 1.
- *
- * Quite a bit further up the callstack in extent_writepage(), ret == 1
- * is treated as a short circuited success and does not unlock the folio,
- * so we must do it here.
- *
- * In the failure case, the locked_folio does get unlocked by
- * btrfs_folio_end_all_writers, which asserts that it is still locked
- * at that point, so we must *not* unlock it here.
- *
- * The other two callsites in compress_file_range do not have a
- * locked_folio, so they are not relevant to this logic.
- */
- if (ret == 0)
- locked_folio = NULL;
-
- extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached,
- clear_flags, PAGE_UNLOCK |
- PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
- return ret;
-}
-
struct async_extent {
u64 start;
u64 ram_size;
@@ -797,7 +747,7 @@ static int add_async_extent(struct async_chunk *cow, u64 start, u64 ram_size,
* options, defragmentation, properties or heuristics.
*/
static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
- u64 end)
+ u64 end, bool check_inline)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
@@ -812,8 +762,9 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
* and will always fallback to regular write later.
*/
if (end + 1 - start <= fs_info->sectorsize &&
- (start > 0 || end + 1 < inode->disk_i_size))
+ (!check_inline || (start > 0 || end + 1 < inode->disk_i_size)))
return 0;
+
/* Defrag ioctl takes precedence over mount options and properties. */
if (inode->defrag_compress == BTRFS_DEFRAG_DONT_COMPRESS)
return 0;
@@ -936,7 +887,6 @@ static void compress_file_range(struct btrfs_work *work)
container_of(work, struct async_chunk, work);
struct btrfs_inode *inode = async_chunk->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct address_space *mapping = inode->vfs_inode.i_mapping;
struct compressed_bio *cb = NULL;
const u32 min_folio_size = btrfs_min_folio_size(fs_info);
u64 blocksize = fs_info->sectorsize;
@@ -1010,7 +960,7 @@ static void compress_file_range(struct btrfs_work *work)
* been flagged as NOCOMPRESS. This flag can change at any time if we
* discover bad compression ratios.
*/
- if (!inode_need_compress(inode, start, end))
+ if (!inode_need_compress(inode, start, end, false))
goto cleanup_and_bail_uncompressed;
if (0 < inode->defrag_compress && inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) {
@@ -1039,35 +989,6 @@ static void compress_file_range(struct btrfs_work *work)
if (loff)
zero_last_folio(cb);
- /*
- * Try to create an inline extent.
- *
- * If we didn't compress the entire range, try to create an uncompressed
- * inline extent, else a compressed one.
- *
- * Check cow_file_range() for why we don't even try to create inline
- * extent for the subpage case.
- */
- if (total_in < actual_end)
- ret = cow_file_range_inline(inode, NULL, start, end, 0,
- BTRFS_COMPRESS_NONE, NULL, false);
- else
- ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
- compress_type,
- bio_first_folio_all(&cb->bbio.bio), false);
- if (ret <= 0) {
- cleanup_compressed_bio(cb);
- if (ret < 0)
- mapping_set_error(mapping, -EIO);
- return;
- }
- /*
- * If a single block at file offset 0 can not be inlined, fallback
- * to regular writes without marking the file incompressible.
- */
- if (start == 0 && end <= blocksize)
- goto cleanup_and_bail_uncompressed;
-
/*
* We aren't doing an inline extent. Round the compressed size up to a
* block size boundary so the allocator does sane things.
@@ -1492,25 +1413,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
-
- if (!(flags & COW_FILE_RANGE_NO_INLINE)) {
- /* lets try to make an inline extent */
- ret = cow_file_range_inline(inode, locked_folio, start, end, 0,
- BTRFS_COMPRESS_NONE, NULL, false);
- if (ret <= 0) {
- /*
- * We succeeded, return 1 so the caller knows we're done
- * with this page and already handled the IO.
- *
- * If there was an error then cow_file_range_inline() has
- * already done the cleanup.
- */
- if (ret == 0)
- ret = 1;
- goto done;
- }
- }
-
alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
/*
@@ -1588,7 +1490,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
}
extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
-done:
if (done_offset)
*done_offset = end;
return ret;
@@ -1891,7 +1792,7 @@ static int fallback_to_cow(struct btrfs_inode *inode,
* a locked folio, which can race with writeback.
*/
ret = cow_file_range(inode, locked_folio, start, end, NULL,
- COW_FILE_RANGE_NO_INLINE | COW_FILE_RANGE_KEEP_LOCKED);
+ COW_FILE_RANGE_KEEP_LOCKED);
ASSERT(ret != 1);
return ret;
}
@@ -2442,6 +2343,80 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
return false;
}
+/*
+ * Return 0 if an inlined extent is created successfully.
+ * Return <0 if critical error happened.
+ * Return >0 if an inline extent can not be created.
+ */
+static int run_delalloc_inline(struct btrfs_inode *inode, struct folio *locked_folio)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct compressed_bio *cb = NULL;
+ struct extent_state *cached = NULL;
+ const u64 i_size = i_size_read(&inode->vfs_inode);
+ const u32 blocksize = fs_info->sectorsize;
+ int compress_type = fs_info->compress_type;
+ int compress_level = fs_info->compress_level;
+ u32 compressed_size = 0;
+ int ret;
+
+ ASSERT(folio_pos(locked_folio) == 0);
+
+ if (btrfs_inode_can_compress(inode) &&
+ inode_need_compress(inode, 0, blocksize, true)) {
+ if (inode->defrag_compress > 0 &&
+ inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) {
+ compress_type = inode->defrag_compress;
+ compress_level = inode->defrag_compress_level;
+ } else if (inode->prop_compress) {
+ compress_type = inode->prop_compress;
+ }
+ cb = btrfs_compress_bio(inode, 0, blocksize, compress_type, compress_level, 0);
+ if (IS_ERR(cb)) {
+ cb = NULL;
+ /* Just fall back to non-compressed case. */
+ } else {
+ compressed_size = cb->bbio.bio.bi_iter.bi_size;
+ }
+ }
+ if (!can_cow_file_range_inline(inode, 0, i_size, compressed_size)) {
+ if (cb)
+ cleanup_compressed_bio(cb);
+ return 1;
+ }
+
+ btrfs_lock_extent(&inode->io_tree, 0, blocksize - 1, &cached);
+ if (cb)
+ ret = __cow_file_range_inline(inode, i_size, compressed_size, compress_type,
+ bio_first_folio_all(&cb->bbio.bio), false);
+ else
+ ret = __cow_file_range_inline(inode, i_size, 0, BTRFS_COMPRESS_NONE,
+ NULL, false);
+ /*
+ * In the successful case (ret == 0 here), run_delalloc_inline() will return 1.
+ *
+ * Quite a bit further up the callstack in extent_writepage(), ret == 1
+ * is treated as a short circuited success and does not unlock the folio,
+ * so we must do it here.
+ *
+ * For failure case, the @locked_folio does get unlocked by
+ * btrfs_folio_end_lock_bitmap(), so we must *not* unlock it here.
+ *
+ * So if ret == 0, we let extent_clear_unlock_delalloc() to unlock the
+ * folio by passing NULL as @locked_folio.
+ * Otherwise pass @locked_folio as usual.
+ */
+ if (ret == 0)
+ locked_folio = NULL;
+ extent_clear_unlock_delalloc(inode, 0, blocksize - 1, locked_folio, &cached,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
+ EXTENT_DO_ACCOUNTING | EXTENT_LOCKED,
+ PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
+ if (cb)
+ cleanup_compressed_bio(cb);
+ return ret;
+}
+
/*
* Function to process delayed allocation (create CoW) for ranges which are
* being touched for the first time.
@@ -2458,11 +2433,26 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol
ASSERT(!(end <= folio_pos(locked_folio) ||
start >= folio_next_pos(locked_folio)));
+ if (start == 0 && end + 1 <= inode->root->fs_info->sectorsize &&
+ end + 1 >= inode->disk_i_size) {
+ int ret;
+
+ ret = run_delalloc_inline(inode, locked_folio);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return 1;
+ /*
+ * Continue regular handling if we can not create an
+ * inlined extent.
+ */
+ }
+
if (should_nocow(inode, start, end))
return run_delalloc_nocow(inode, locked_folio, start, end);
if (btrfs_inode_can_compress(inode) &&
- inode_need_compress(inode, start, end) &&
+ inode_need_compress(inode, start, end, false) &&
run_delalloc_compressed(inode, locked_folio, start, end, wbc))
return 1;
--
2.52.0
^ permalink raw reply related [flat|nested] only message in thread
only message in thread, other threads:[~2026-02-17 3:54 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-17 3:53 [PATCH v2] btrfs: extract inlined creation into a dedicated delalloc helper Qu Wenruo
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox