* [PATCH v3 1/9] btrfs: introduce lzo_compress_bio() helper
2026-01-27 3:10 [PATCH v3 0/9] btrfs: used compressed_bio structure for read and write Qu Wenruo
@ 2026-01-27 3:10 ` Qu Wenruo
2026-01-27 3:10 ` [PATCH v3 2/9] btrfs: introduce zstd_compress_bio() helper Qu Wenruo
` (7 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Qu Wenruo @ 2026-01-27 3:10 UTC (permalink / raw)
To: linux-btrfs
The new helper has the following enhancements against the existing
lzo_compress_folios()
- Much smaller parameter list
No more shared IN/OUT members, no need to pre-allocate a
compressed_folios[] array.
Just a workspace list header and a compressed_bio pointer.
Everything else can be fetched from that @cb pointer.
- Read-to-be-submitted compressed bio
Although the caller still needs to do some common works like
rounding up and zeroing the tailing part of the last fs block.
Some workloads are specific to lzo that is not needed with other
multi-run compression interfaces:
- Need to write a LZO header or segment header
Use the new write_and_queue_folio() helper to do the bio_add_folio()
call and folio switching.
- Need to update the LZO header after compression is done
Use bio_first_folio_all() to grab the first folio and update the header.
- Extra corner case of error handling
This can happen when we have queued part of a folio and hit an error.
In that case those folios will be released by the bio.
Thus we can only release the folio that has no queued part.
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
fs/btrfs/compression.h | 1 +
fs/btrfs/lzo.c | 261 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 262 insertions(+)
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index e0228017e861..4b63d7e4a9ad 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -161,6 +161,7 @@ struct list_head *zlib_get_workspace(struct btrfs_fs_info *fs_info, unsigned int
int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
+int lzo_compress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index bd5ee82080fa..7314ab500005 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -214,6 +214,159 @@ static int copy_compressed_data_to_page(struct btrfs_fs_info *fs_info,
return 0;
}
+/*
+ * Write data into @out_folio and queue it into @out_bio.
+ *
+ * Return 0 if everything is fine and @total_out will be increased.
+ * Return <0 for error.
+ *
+ * The @out_folio can be NULL after a full folio is queued.
+ * Thus the caller should check and allocate a new folio when needed.
+ */
+static int write_and_queue_folio(struct bio *out_bio,
+ struct folio **out_folio,
+ u32 *total_out, u32 write_len)
+{
+ const u32 fsize = folio_size(*out_folio);
+ const u32 foffset = offset_in_folio(*out_folio, *total_out);
+
+ ASSERT(out_folio && *out_folio);
+ /* Should not cross folio boundary. */
+ ASSERT(foffset + write_len <= fsize);
+
+ /* We can not use bio_add_folio_nofail() which doesn't do any merge. */
+ if (!bio_add_folio(out_bio, *out_folio, write_len, foffset)) {
+ /*
+ * We have allocated a bio that havs BTRFS_MAX_COMPRESSED_PAGES
+ * vecs, and all ranges inside the same folio should have been
+ * merged.
+ * If bio_add_folio() still failed, that means we have reached
+ * the bvec limits.
+ *
+ * This should only happen at the beginning of a folio, and
+ * caller is responsible for releasing the folio, since it's
+ * not yet queued into the bio.
+ */
+ ASSERT(IS_ALIGNED(*total_out, fsize));
+ return -E2BIG;
+ }
+
+ *total_out += write_len;
+ /*
+ * The full folio has been filled and queued, reset @out_folio to NULL,
+ * so that error handling is fully handled by the bio.
+ */
+ if (IS_ALIGNED(*total_out, fsize))
+ *out_folio = NULL;
+ return 0;
+}
+
+/*
+ * Will do:
+ *
+ * - Write a segment header into the destination
+ * - Copy the compressed buffer into the destination
+ * - Make sure we have enough space in the last sector to fit a segment header
+ * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
+ * - If a full folio is filled, it will be queued into @out_bio, and @out_folio
+ * will be updated.
+ *
+ * Will allocate new pages when needed.
+ *
+ * @out_bio: The bio that will contain all the compressed data.
+ * @compressed_data: The compressed data of this segment.
+ * @compressed_size: The size of the compressed data.
+ * @out_folio: The current output folio, will be updated if a new
+ * folio is allocated.
+ * @total_out: The total bytes of current output.
+ * @max_out: The maximum size of the compressed data.
+ */
+static int copy_compressed_data_to_bio(struct btrfs_fs_info *fs_info,
+ struct bio *out_bio,
+ const char *compressed_data,
+ size_t compressed_size,
+ struct folio **out_folio,
+ u32 *total_out, u32 max_out)
+{
+ const u32 sectorsize = fs_info->sectorsize;
+ const u32 sectorsize_bits = fs_info->sectorsize_bits;
+ const u32 fsize = btrfs_min_folio_size(fs_info);
+ const u32 old_size = out_bio->bi_iter.bi_size;
+ u32 copy_start;
+ u32 sector_bytes_left;
+ char *kaddr;
+ int ret;
+
+ ASSERT(out_folio);
+
+ /* There should be at least a lzo header queued. */
+ ASSERT(old_size);
+ ASSERT(old_size == *total_out);
+
+ /*
+ * We never allow a segment header crossing sector boundary, previous
+ * run should ensure we have enough space left inside the sector.
+ */
+ ASSERT((old_size >> sectorsize_bits) == (old_size + LZO_LEN - 1) >> sectorsize_bits);
+
+ if (!*out_folio) {
+ *out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (!*out_folio)
+ return -ENOMEM;
+ }
+
+ /* Write the segment header first. */
+ kaddr = kmap_local_folio(*out_folio, offset_in_folio(*out_folio, *total_out));
+ write_compress_length(kaddr, compressed_size);
+ kunmap_local(kaddr);
+ ret = write_and_queue_folio(out_bio, out_folio, total_out, LZO_LEN);
+ if (ret < 0)
+ return ret;
+
+ copy_start = *total_out;
+
+ /* Copy compressed data. */
+ while (*total_out - copy_start < compressed_size) {
+ u32 copy_len = min_t(u32, sectorsize - *total_out % sectorsize,
+ copy_start + compressed_size - *total_out);
+ u32 foffset = *total_out & (fsize - 1);
+
+ /* With the range copied, we're larger than the original range. */
+ if (((*total_out + copy_len) >> sectorsize_bits) >=
+ max_out >> sectorsize_bits)
+ return -E2BIG;
+
+ if (!*out_folio) {
+ *out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (!*out_folio)
+ return -ENOMEM;
+ }
+
+ kaddr = kmap_local_folio(*out_folio, foffset);
+ memcpy(kaddr, compressed_data + *total_out - copy_start, copy_len);
+ kunmap_local(kaddr);
+ ret = write_and_queue_folio(out_bio, out_folio, total_out, copy_len);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Check if we can fit the next segment header into the remaining space
+ * of the sector.
+ */
+ sector_bytes_left = round_up(*total_out, sectorsize) - *total_out;
+ if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
+ return 0;
+
+ ASSERT(*out_folio);
+
+ /* The remaining size is not enough, pad it with zeros */
+ folio_zero_range(*out_folio, offset_in_folio(*out_folio, *total_out),
+ sector_bytes_left);
+ return write_and_queue_folio(out_bio, out_folio, total_out,
+ sector_bytes_left);
+}
+
int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out)
@@ -310,6 +463,114 @@ int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
return ret;
}
+int lzo_compress_bio(struct list_head *ws, struct compressed_bio *cb)
+{
+ struct btrfs_inode *inode = cb->bbio.inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ struct bio *bio = &cb->bbio.bio;
+ const u64 start = cb->start;
+ const u32 len = cb->len;
+ const u32 sectorsize = fs_info->sectorsize;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ struct folio *folio_in = NULL;
+ struct folio *folio_out = NULL;
+ char *sizes_ptr;
+ int ret = 0;
+ /* Points to the file offset of input data */
+ u64 cur_in = start;
+ /* Points to the current output byte */
+ u32 total_out = 0;
+
+ ASSERT(bio->bi_iter.bi_size == 0);
+ ASSERT(len);
+
+ folio_out = btrfs_alloc_compr_folio(fs_info);
+ if (!folio_out)
+ return -ENOMEM;
+
+ /* Queue a segment header first. */
+ ret = write_and_queue_folio(bio, &folio_out, &total_out, LZO_LEN);
+ /* The first header should not fail. */
+ ASSERT(ret == 0);
+
+ while (cur_in < start + len) {
+ char *data_in;
+ const u32 sectorsize_mask = sectorsize - 1;
+ u32 sector_off = (cur_in - start) & sectorsize_mask;
+ u32 in_len;
+ size_t out_len;
+
+ /* Get the input page first */
+ if (!folio_in) {
+ ret = btrfs_compress_filemap_get_folio(mapping, cur_in, &folio_in);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* Compress at most one sector of data each time */
+ in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
+ ASSERT(in_len);
+ data_in = kmap_local_folio(folio_in, offset_in_folio(folio_in, cur_in));
+ ret = lzo1x_1_compress(data_in, in_len,
+ workspace->cbuf, &out_len,
+ workspace->mem);
+ kunmap_local(data_in);
+ if (unlikely(ret < 0)) {
+ /* lzo1x_1_compress never fails. */
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = copy_compressed_data_to_bio(fs_info, bio, workspace->cbuf, out_len,
+ &folio_out, &total_out, len);
+ if (ret < 0)
+ goto out;
+
+ cur_in += in_len;
+
+ /*
+ * Check if we're making it bigger after two sectors. And if
+ * it is so, give up.
+ */
+ if (cur_in - start > sectorsize * 2 && cur_in - start < total_out) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ /* Check if we have reached input folio boundary. */
+ if (IS_ALIGNED(cur_in, min_folio_size)) {
+ folio_put(folio_in);
+ folio_in = NULL;
+ }
+ }
+ /*
+ * The last folio is already queued. Bio is responsible for
+ * freeing those folios now.
+ */
+ folio_out = NULL;
+
+ /* Store the size of all chunks of compressed data */
+ sizes_ptr = kmap_local_folio(bio_first_folio_all(bio), 0);
+ write_compress_length(sizes_ptr, total_out);
+ kunmap_local(sizes_ptr);
+out:
+ /*
+ * We can only free the folio that has no part queued into the bio.
+ *
+ * As any folio that is already queued into bio will be released by
+ * the endio function of bio.
+ */
+ if (folio_out && IS_ALIGNED(total_out, min_folio_size)) {
+ btrfs_free_compr_folio(folio_out);
+ folio_out = NULL;
+ }
+ if (folio_in)
+ folio_put(folio_in);
+ return ret;
+}
+
static struct folio *get_current_folio(struct compressed_bio *cb, struct folio_iter *fi,
u32 *cur_folio_index, u32 cur_in)
{
--
2.52.0
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH v3 2/9] btrfs: introduce zstd_compress_bio() helper
2026-01-27 3:10 [PATCH v3 0/9] btrfs: used compressed_bio structure for read and write Qu Wenruo
2026-01-27 3:10 ` [PATCH v3 1/9] btrfs: introduce lzo_compress_bio() helper Qu Wenruo
@ 2026-01-27 3:10 ` Qu Wenruo
2026-01-27 3:10 ` [PATCH v3 3/9] btrfs: introduce zlib_compress_bio() helper Qu Wenruo
` (6 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Qu Wenruo @ 2026-01-27 3:10 UTC (permalink / raw)
To: linux-btrfs
The new helper has the following enhancements against the existing
zstd_compress_folios()
- Much smaller parameter list
No more shared IN/OUT members, no need to pre-allocate a
compressed_folios[] array.
Just a workspace and compressed_bio pointer, everything we need can be
extracted from that @cb pointer.
- Ready-to-be-submitted compressed bio
Although the caller still needs to do some common works like
rounding up and zeroing the tailing part of the last fs block.
Overall the workflow is the same as zstd_compress_folios(), but with
some minor changes:
- @start/@len is now constant
For the current input file offset, use @start + @tot_in instead.
The original change of @start and @len makes it pretty hard to know
what value we're really comparing to.
- No more @cur_len
It's only utilized when switching input buffer.
Directly use btrfs_calc_input_length() instead.
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
fs/btrfs/compression.h | 1 +
fs/btrfs/zstd.c | 185 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 186 insertions(+)
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 4b63d7e4a9ad..454c8e0461b4 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -172,6 +172,7 @@ void lzo_free_workspace(struct list_head *ws);
int zstd_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
+int zstd_compress_bio(struct list_head *ws, struct compressed_bio *cb);
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zstd_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 7fad1e299c7a..ce204a9300b5 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -585,6 +585,191 @@ int zstd_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
return ret;
}
+int zstd_compress_bio(struct list_head *ws, struct compressed_bio *cb)
+{
+ struct btrfs_inode *inode = cb->bbio.inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ struct bio *bio = &cb->bbio.bio;
+ zstd_cstream *stream;
+ int ret = 0;
+ struct folio *in_folio = NULL; /* The current folio to read. */
+ struct folio *out_folio = NULL; /* The current folio to write to. */
+ unsigned long tot_in = 0;
+ unsigned long tot_out = 0;
+ const u64 start = cb->start;
+ const u32 len = cb->len;
+ const u64 end = start + len;
+ const u32 blocksize = fs_info->sectorsize;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
+
+ workspace->params = zstd_get_btrfs_parameters(workspace->req_level, len);
+
+ /* Initialize the stream */
+ stream = zstd_init_cstream(&workspace->params, len, workspace->mem,
+ workspace->size);
+ if (unlikely(!stream)) {
+ btrfs_err(fs_info,
+ "zstd compression init level %d failed, root %llu inode %llu offset %llu",
+ workspace->req_level, btrfs_root_id(inode->root),
+ btrfs_ino(inode), start);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* map in the first page of input data */
+ ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
+ if (ret < 0)
+ goto out;
+ workspace->in_buf.src = kmap_local_folio(in_folio, offset_in_folio(in_folio, start));
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = btrfs_calc_input_length(in_folio, end, start);
+
+ /* Allocate and map in the output buffer */
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ workspace->out_buf.dst = folio_address(out_folio);
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = min_folio_size;
+
+ while (1) {
+ size_t ret2;
+
+ ret2 = zstd_compress_stream(stream, &workspace->out_buf,
+ &workspace->in_buf);
+ if (unlikely(zstd_is_error(ret2))) {
+ btrfs_warn(fs_info,
+"zstd compression level %d failed, error %d root %llu inode %llu offset %llu",
+ workspace->req_level, zstd_get_error_code(ret2),
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ start + tot_in);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Check to see if we are making it bigger */
+ if (tot_in + workspace->in_buf.pos > blocksize * 2 &&
+ tot_in + workspace->in_buf.pos < tot_out + workspace->out_buf.pos) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ /* Check if we need more output space */
+ if (workspace->out_buf.pos >= workspace->out_buf.size) {
+ tot_out += min_folio_size;
+ if (tot_out >= len) {
+ ret = -E2BIG;
+ goto out;
+ }
+ /* Queue the current foliot into the bio. */
+ if (!bio_add_folio(bio, out_folio, folio_size(out_folio), 0)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ workspace->out_buf.dst = folio_address(out_folio);
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = min_folio_size;
+ }
+
+ /* We've reached the end of the input */
+ if (tot_in + workspace->in_buf.pos >= len) {
+ tot_in += workspace->in_buf.pos;
+ break;
+ }
+
+ /* Check if we need more input */
+ if (workspace->in_buf.pos >= workspace->in_buf.size) {
+ u64 cur;
+
+ tot_in += workspace->in_buf.size;
+ cur = start + tot_in;
+
+ kunmap_local(workspace->in_buf.src);
+ workspace->in_buf.src = NULL;
+ folio_put(in_folio);
+
+ ret = btrfs_compress_filemap_get_folio(mapping, cur, &in_folio);
+ if (ret < 0)
+ goto out;
+ workspace->in_buf.src = kmap_local_folio(in_folio,
+ offset_in_folio(in_folio, cur));
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = btrfs_calc_input_length(in_folio, end, cur);
+ }
+ }
+ while (1) {
+ size_t ret2;
+
+ ret2 = zstd_end_stream(stream, &workspace->out_buf);
+ if (unlikely(zstd_is_error(ret2))) {
+ btrfs_err(fs_info,
+"zstd compression end level %d failed, error %d root %llu inode %llu offset %llu",
+ workspace->req_level, zstd_get_error_code(ret2),
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ start + tot_in);
+ ret = -EIO;
+ goto out;
+ }
+ /* Queue the remaining part of the output folio into bio. */
+ if (ret2 == 0) {
+ tot_out += workspace->out_buf.pos;
+ if (tot_out >= len) {
+ ret = -E2BIG;
+ goto out;
+ }
+ if (!bio_add_folio(bio, out_folio, workspace->out_buf.pos, 0)) {
+ ret = -E2BIG;
+ goto out;
+ }
+ out_folio = NULL;
+ break;
+ }
+ tot_out += min_folio_size;
+ if (tot_out >= len) {
+ ret = -E2BIG;
+ goto out;
+ }
+ if (!bio_add_folio(bio, out_folio, folio_size(out_folio), 0)) {
+ ret = -E2BIG;
+ goto out;
+ }
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ workspace->out_buf.dst = folio_address(out_folio);
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = min_folio_size;
+ }
+
+ if (tot_out >= tot_in) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ ret = 0;
+ ASSERT(tot_out == bio->bi_iter.bi_size);
+out:
+ if (out_folio)
+ btrfs_free_compr_folio(out_folio);
+ if (workspace->in_buf.src) {
+ kunmap_local(workspace->in_buf.src);
+ folio_put(in_folio);
+ }
+ return ret;
+}
+
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
--
2.52.0
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH v3 3/9] btrfs: introduce zlib_compress_bio() helper
2026-01-27 3:10 [PATCH v3 0/9] btrfs: used compressed_bio structure for read and write Qu Wenruo
2026-01-27 3:10 ` [PATCH v3 1/9] btrfs: introduce lzo_compress_bio() helper Qu Wenruo
2026-01-27 3:10 ` [PATCH v3 2/9] btrfs: introduce zstd_compress_bio() helper Qu Wenruo
@ 2026-01-27 3:10 ` Qu Wenruo
2026-01-27 3:10 ` [PATCH v3 4/9] btrfs: introduce btrfs_compress_bio() helper Qu Wenruo
` (5 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Qu Wenruo @ 2026-01-27 3:10 UTC (permalink / raw)
To: linux-btrfs
The new helper has the following enhancements against the existing
zlib_compress_folios()
- Much smaller parameter list
No more shared IN/OUT members, no need to pre-allocate a
compressed_folios[] array.
Just a workspace and compressed_bio pointer, everything we need can be
extracted from that @cb pointer.
- Ready-to-be-submitted compressed bio
Although the caller still needs to do some common works like
rounding up and zeroing the tailing part of the last fs block.
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
fs/btrfs/compression.h | 1 +
fs/btrfs/zlib.c | 193 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 194 insertions(+)
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 454c8e0461b4..eee4190efa02 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -150,6 +150,7 @@ int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
int zlib_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
+int zlib_compress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index a004aa4ee9e2..6f2a43f06b5c 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -334,6 +334,199 @@ int zlib_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
return ret;
}
+int zlib_compress_bio(struct list_head *ws, struct compressed_bio *cb)
+{
+ struct btrfs_inode *inode = cb->bbio.inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ struct bio *bio = &cb->bbio.bio;
+ u64 start = cb->start;
+ u32 len = cb->len;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
+ int ret;
+ char *data_in = NULL;
+ char *cfolio_out;
+ struct folio *in_folio = NULL;
+ struct folio *out_folio = NULL;
+ const u32 blocksize = fs_info->sectorsize;
+ const u64 orig_end = start + len;
+
+ ret = zlib_deflateInit(&workspace->strm, workspace->level);
+ if (unlikely(ret != Z_OK)) {
+ btrfs_err(fs_info,
+ "zlib compression init failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode), start);
+ ret = -EIO;
+ goto out;
+ }
+
+ workspace->strm.total_in = 0;
+ workspace->strm.total_out = 0;
+
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ cfolio_out = folio_address(out_folio);
+
+ workspace->strm.next_in = workspace->buf;
+ workspace->strm.avail_in = 0;
+ workspace->strm.next_out = cfolio_out;
+ workspace->strm.avail_out = min_folio_size;
+
+ while (workspace->strm.total_in < len) {
+ /*
+ * Get next input pages and copy the contents to
+ * the workspace buffer if required.
+ */
+ if (workspace->strm.avail_in == 0) {
+ unsigned long bytes_left = len - workspace->strm.total_in;
+ unsigned int copy_length = min(bytes_left, workspace->buf_size);
+
+ /*
+ * For s390 hardware accelerated zlib, and our folio is smaller
+ * than the copy_length, we need to fill the buffer so that
+ * we can take full advantage of hardware acceleration.
+ */
+ if (need_special_buffer(fs_info)) {
+ ret = copy_data_into_buffer(mapping, workspace,
+ start, copy_length);
+ if (ret < 0)
+ goto out;
+ start += copy_length;
+ workspace->strm.next_in = workspace->buf;
+ workspace->strm.avail_in = copy_length;
+ } else {
+ unsigned int cur_len;
+
+ if (data_in) {
+ kunmap_local(data_in);
+ folio_put(in_folio);
+ data_in = NULL;
+ }
+ ret = btrfs_compress_filemap_get_folio(mapping,
+ start, &in_folio);
+ if (ret < 0)
+ goto out;
+ cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
+ data_in = kmap_local_folio(in_folio,
+ offset_in_folio(in_folio, start));
+ start += cur_len;
+ workspace->strm.next_in = data_in;
+ workspace->strm.avail_in = cur_len;
+ }
+ }
+
+ ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
+ if (unlikely(ret != Z_OK)) {
+ btrfs_warn(fs_info,
+ "zlib compression failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode),
+ start);
+ zlib_deflateEnd(&workspace->strm);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* we're making it bigger, give up */
+ if (workspace->strm.total_in > blocksize * 2 &&
+ workspace->strm.total_in <
+ workspace->strm.total_out) {
+ ret = -E2BIG;
+ goto out;
+ }
+ if (workspace->strm.total_out >= len) {
+ ret = -E2BIG;
+ goto out;
+ }
+ /* Queue the full folio and allocate a new one. */
+ if (workspace->strm.avail_out == 0) {
+ if (!bio_add_folio(bio, out_folio, folio_size(out_folio), 0)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ cfolio_out = folio_address(out_folio);
+ workspace->strm.avail_out = min_folio_size;
+ workspace->strm.next_out = cfolio_out;
+ }
+ /* we're all done */
+ if (workspace->strm.total_in >= len)
+ break;
+ }
+ workspace->strm.avail_in = 0;
+ /*
+ * Call deflate with Z_FINISH flush parameter providing more output
+ * space but no more input data, until it returns with Z_STREAM_END.
+ */
+ while (ret != Z_STREAM_END) {
+ ret = zlib_deflate(&workspace->strm, Z_FINISH);
+ if (ret == Z_STREAM_END)
+ break;
+ if (unlikely(ret != Z_OK && ret != Z_BUF_ERROR)) {
+ zlib_deflateEnd(&workspace->strm);
+ ret = -EIO;
+ goto out;
+ } else if (workspace->strm.avail_out == 0) {
+ if (workspace->strm.total_out >= len) {
+ ret = -E2BIG;
+ goto out;
+ }
+ if (!bio_add_folio(bio, out_folio, folio_size(out_folio), 0)) {
+ ret = -E2BIG;
+ goto out;
+ }
+ /* Get another folio for the stream end. */
+ out_folio = btrfs_alloc_compr_folio(fs_info);
+ if (out_folio == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ cfolio_out = folio_address(out_folio);
+ workspace->strm.avail_out = min_folio_size;
+ workspace->strm.next_out = cfolio_out;
+ }
+ }
+ /* Queue the remaining part of the folio. */
+ if (workspace->strm.total_out > bio->bi_iter.bi_size) {
+ u32 cur_len = offset_in_folio(out_folio, workspace->strm.total_out);
+
+ if (!bio_add_folio(bio, out_folio, cur_len, 0)) {
+ ret = -E2BIG;
+ goto out;
+ }
+ } else {
+ /* The last folio didn't get utilized. */
+ btrfs_free_compr_folio(out_folio);
+ }
+ out_folio = NULL;
+ ASSERT(bio->bi_iter.bi_size == workspace->strm.total_out);
+ zlib_deflateEnd(&workspace->strm);
+
+ if (workspace->strm.total_out >= workspace->strm.total_in) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (out_folio)
+ btrfs_free_compr_folio(out_folio);
+ if (data_in) {
+ kunmap_local(data_in);
+ folio_put(in_folio);
+ }
+
+ return ret;
+}
+
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
--
2.52.0
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH v3 4/9] btrfs: introduce btrfs_compress_bio() helper
2026-01-27 3:10 [PATCH v3 0/9] btrfs: used compressed_bio structure for read and write Qu Wenruo
` (2 preceding siblings ...)
2026-01-27 3:10 ` [PATCH v3 3/9] btrfs: introduce zlib_compress_bio() helper Qu Wenruo
@ 2026-01-27 3:10 ` Qu Wenruo
2026-01-27 3:10 ` [PATCH v3 5/9] btrfs: switch to btrfs_compress_bio() interface for compressed writes Qu Wenruo
` (4 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Qu Wenruo @ 2026-01-27 3:10 UTC (permalink / raw)
To: linux-btrfs
The helper will allocate a new compressed_bio, do the compression, and
return it to the caller.
This greatly simplifies the compression path, as we no longer need to
allocate a folio array thus no extra error path, furthermore the
compressed bio structure can be utilized for submission with very minor
modifications (like rounding up the bi_size and populate the bi_sector).
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
fs/btrfs/compression.c | 68 ++++++++++++++++++++++++++++++++++++++++++
fs/btrfs/compression.h | 13 ++++++++
2 files changed, 81 insertions(+)
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 4c6298cf01b2..942b85bcacbe 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1064,6 +1064,74 @@ int btrfs_compress_folios(unsigned int type, int level, struct btrfs_inode *inod
return ret;
}
+/*
+ * Given an address space and start and length, compress the page cache
+ * contents into @cb.
+ *
+ * @type_level is encoded algorithm and level, where level 0 means whatever
+ * default the algorithm chooses and is opaque here;
+ * - compression algo are 0-3
+ * - the level are bits 4-7
+ *
+ * @cb->bbio.bio.bi_iter.bi_size will indicate the compressed data size.
+ * The bi_size may not be sectorsize aligned, thus the caller still need
+ * to do the round up before submission.
+ *
+ * This function will allocate compressed folios with btrfs_alloc_compr_folio(),
+ * thus callers must make sure the endio function and error handling are using
+ * btrfs_free_compr_folio() to release those folios.
+ * This is already done in end_bbio_compressed_write() and cleanup_compressed_bio().
+ */
+struct compressed_bio *btrfs_compress_bio(struct btrfs_inode *inode,
+ u64 start, u32 len, unsigned int type,
+ int level, blk_opf_t write_flags)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct list_head *workspace;
+ struct compressed_bio *cb;
+ int ret;
+
+ cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE | write_flags,
+ end_bbio_compressed_write);
+ cb->start = start;
+ cb->len = len;
+ cb->writeback = true;
+ cb->compress_type = type;
+
+ level = btrfs_compress_set_level(type, level);
+ workspace = get_workspace(fs_info, type, level);
+ switch (type) {
+ case BTRFS_COMPRESS_ZLIB:
+ ret = zlib_compress_bio(workspace, cb);
+ break;
+ case BTRFS_COMPRESS_LZO:
+ ret = lzo_compress_bio(workspace, cb);
+ break;
+ case BTRFS_COMPRESS_ZSTD:
+ ret = zstd_compress_bio(workspace, cb);
+ break;
+ case BTRFS_COMPRESS_NONE:
+ default:
+ /*
+ * This can happen when compression races with remount setting
+ * it to 'no compress', while caller doesn't call
+ * inode_need_compress() to check if we really need to
+ * compress.
+ *
+ * Not a big deal, just need to inform caller that we
+ * haven't allocated any pages yet.
+ */
+ ret = -E2BIG;
+ }
+
+ put_workspace(fs_info, type, workspace);
+ if (ret < 0) {
+ cleanup_compressed_bio(cb);
+ return ERR_PTR(ret);
+ }
+ return cb;
+}
+
static int btrfs_decompress_bio(struct compressed_bio *cb)
{
struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index eee4190efa02..fd0cce5d07cf 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -146,6 +146,19 @@ int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end);
int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
struct folio **in_folio_ret);
+struct compressed_bio *btrfs_compress_bio(struct btrfs_inode *inode,
+ u64 start, u32 len, unsigned int type,
+ int level, blk_opf_t write_flags);
+
+static inline void cleanup_compressed_bio(struct compressed_bio *cb)
+{
+ struct bio *bio = &cb->bbio.bio;
+ struct folio_iter fi;
+
+ bio_for_each_folio_all(fi, bio)
+ btrfs_free_compr_folio(fi.folio);
+ bio_put(bio);
+}
int zlib_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
u64 start, struct folio **folios, unsigned long *out_folios,
--
2.52.0
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH v3 5/9] btrfs: switch to btrfs_compress_bio() interface for compressed writes
2026-01-27 3:10 [PATCH v3 0/9] btrfs: used compressed_bio structure for read and write Qu Wenruo
` (3 preceding siblings ...)
2026-01-27 3:10 ` [PATCH v3 4/9] btrfs: introduce btrfs_compress_bio() helper Qu Wenruo
@ 2026-01-27 3:10 ` Qu Wenruo
2026-01-27 3:10 ` [PATCH v3 6/9] btrfs: remove the old btrfs_compress_folios() infrastructures Qu Wenruo
` (3 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Qu Wenruo @ 2026-01-27 3:10 UTC (permalink / raw)
To: linux-btrfs
This switch has the following benefits:
- A single structure to handle all compression
No more extra members like compressed_folios[] nor compress_type, all
those members.
This means the structure of async_extent is much smaller.
- Simpler error handling
A single cleanup_compressed_bio() will handle everything, no extra
compressed_folios[] array to bother.
Some special notes:
- Compressed folios releasing
Now we go bio_for_each_folio_all() loop to release the folios of the
bio. This will work for both the old compressed_folios[] array and the
new pure bio method.
For old compressed_folios[], all folios of that array is queued into
the bio, thus releasing the folios from the bio is the same as
releasing each folio of that array. We jut need to be sure no double
releasing from the array and bio.
For the new pure bio method, that array is NULL, just usual folio
releasing of the bio.
The only extra note is for end_bbio_compressed_read(), as the folios
are allocated using btrfs_alloc_folio_array(), thus the folios should
only be released by regular folio_put(), not btrfs_free_compr_folio().
- Rounding up the bio to block size
We can not simply increase bi_size, as that will not increase the
length of the last bvec.
Thus we have to properly add the last part into the bio.
This will be done by the helper, round_up_last_block().
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
fs/btrfs/compression.c | 16 ++--
fs/btrfs/inode.c | 161 +++++++++++++++++++----------------------
2 files changed, 83 insertions(+), 94 deletions(-)
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 942b85bcacbe..1d4e7c7c25c3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -155,13 +155,6 @@ static int compression_decompress(int type, struct list_head *ws,
}
}
-static void btrfs_free_compressed_folios(struct compressed_bio *cb)
-{
- for (unsigned int i = 0; i < cb->nr_folios; i++)
- btrfs_free_compr_folio(cb->compressed_folios[i]);
- kfree(cb->compressed_folios);
-}
-
static int btrfs_decompress_bio(struct compressed_bio *cb);
/*
@@ -270,12 +263,14 @@ static void end_bbio_compressed_read(struct btrfs_bio *bbio)
{
struct compressed_bio *cb = to_compressed_bio(bbio);
blk_status_t status = bbio->bio.bi_status;
+ struct folio_iter fi;
if (!status)
status = errno_to_blk_status(btrfs_decompress_bio(cb));
- btrfs_free_compressed_folios(cb);
btrfs_bio_end_io(cb->orig_bbio, status);
+ bio_for_each_folio_all(fi, &bbio->bio)
+ folio_put(fi.folio);
bio_put(&bbio->bio);
}
@@ -326,6 +321,7 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb)
static void end_bbio_compressed_write(struct btrfs_bio *bbio)
{
struct compressed_bio *cb = to_compressed_bio(bbio);
+ struct folio_iter fi;
btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
cb->bbio.bio.bi_status == BLK_STS_OK);
@@ -333,7 +329,9 @@ static void end_bbio_compressed_write(struct btrfs_bio *bbio)
if (cb->writeback)
end_compressed_writeback(cb);
/* Note, our inode could be gone now. */
- btrfs_free_compressed_folios(cb);
+ bio_for_each_folio_all(fi, &bbio->bio)
+ btrfs_free_compr_folio(fi.folio);
+ kfree(cb->compressed_folios);
bio_put(&cb->bbio.bio);
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 10609b8199a0..aafffb72dd0e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -755,10 +755,7 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode,
struct async_extent {
u64 start;
u64 ram_size;
- u64 compressed_size;
- struct folio **folios;
- unsigned long nr_folios;
- int compress_type;
+ struct compressed_bio *cb;
struct list_head list;
};
@@ -779,24 +776,19 @@ struct async_cow {
struct async_chunk chunks[];
};
-static noinline int add_async_extent(struct async_chunk *cow,
- u64 start, u64 ram_size,
- u64 compressed_size,
- struct folio **folios,
- unsigned long nr_folios,
- int compress_type)
+static int add_async_extent(struct async_chunk *cow,
+ u64 start, u64 ram_size,
+ struct compressed_bio *cb)
{
struct async_extent *async_extent;
async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
if (!async_extent)
return -ENOMEM;
+ ASSERT(ram_size < U32_MAX);
async_extent->start = start;
async_extent->ram_size = ram_size;
- async_extent->compressed_size = compressed_size;
- async_extent->folios = folios;
- async_extent->nr_folios = nr_folios;
- async_extent->compress_type = compress_type;
+ async_extent->cb = cb;
list_add_tail(&async_extent->list, &cow->extents);
return 0;
}
@@ -870,6 +862,36 @@ static int extent_range_clear_dirty_for_io(struct btrfs_inode *inode, u64 start,
return ret;
}
+static void zero_last_folio(struct compressed_bio *cb)
+{
+ struct bio *bio = &cb->bbio.bio;
+ struct bio_vec *bvec = bio_last_bvec_all(bio);
+ phys_addr_t last_paddr = page_to_phys(bvec->bv_page) + bvec->bv_offset + bvec->bv_len - 1;
+ struct folio *last_folio = page_folio(phys_to_page(last_paddr));
+ const u32 bio_size = bio->bi_iter.bi_size;
+ const u32 foffset = offset_in_folio(last_folio, bio_size);
+
+ folio_zero_range(last_folio, foffset, folio_size(last_folio) - foffset);
+}
+
+static void round_up_last_block(struct compressed_bio *cb, u32 blocksize)
+{
+ struct bio *bio = &cb->bbio.bio;
+ struct bio_vec *bvec = bio_last_bvec_all(bio);
+ phys_addr_t last_paddr = page_to_phys(bvec->bv_page) + bvec->bv_offset + bvec->bv_len - 1;
+ struct folio *last_folio = page_folio(phys_to_page(last_paddr));
+ const u32 bio_size = bio->bi_iter.bi_size;
+ const u32 foffset = offset_in_folio(last_folio, bio_size);
+ bool ret;
+
+ if (IS_ALIGNED(bio_size, blocksize))
+ return;
+
+ ret = bio_add_folio(bio, last_folio, round_up(foffset, blocksize) - foffset, foffset);
+ /* The remaining part should be merged thus never fail. */
+ ASSERT(ret);
+}
+
/*
* Work queue call back to started compression on a file and pages.
*
@@ -890,20 +912,18 @@ static void compress_file_range(struct btrfs_work *work)
struct btrfs_inode *inode = async_chunk->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct address_space *mapping = inode->vfs_inode.i_mapping;
- const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
+ struct compressed_bio *cb = NULL;
const u32 min_folio_size = btrfs_min_folio_size(fs_info);
u64 blocksize = fs_info->sectorsize;
u64 start = async_chunk->start;
u64 end = async_chunk->end;
u64 actual_end;
u64 i_size;
+ u32 cur_len;
int ret = 0;
- struct folio **folios = NULL;
- unsigned long nr_folios;
unsigned long total_compressed = 0;
unsigned long total_in = 0;
unsigned int loff;
- int i;
int compress_type = fs_info->compress_type;
int compress_level = fs_info->compress_level;
@@ -942,9 +962,10 @@ static void compress_file_range(struct btrfs_work *work)
barrier();
actual_end = min_t(u64, i_size, end + 1);
again:
- folios = NULL;
- nr_folios = (end >> min_folio_shift) - (start >> min_folio_shift) + 1;
- nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED >> min_folio_shift);
+ total_in = 0;
+ cur_len = min(end + 1 - start, BTRFS_MAX_UNCOMPRESSED);
+ ret = 0;
+ cb = NULL;
/*
* we don't want to send crud past the end of i_size through
@@ -959,10 +980,6 @@ static void compress_file_range(struct btrfs_work *work)
if (actual_end <= start)
goto cleanup_and_bail_uncompressed;
- total_compressed = min_t(unsigned long, actual_end - start, BTRFS_MAX_UNCOMPRESSED);
- total_in = 0;
- ret = 0;
-
/*
* We do compression for mount -o compress and when the inode has not
* been flagged as NOCOMPRESS. This flag can change at any time if we
@@ -971,15 +988,6 @@ static void compress_file_range(struct btrfs_work *work)
if (!inode_need_compress(inode, start, end))
goto cleanup_and_bail_uncompressed;
- folios = kcalloc(nr_folios, sizeof(struct folio *), GFP_NOFS);
- if (!folios) {
- /*
- * Memory allocation failure is not a fatal error, we can fall
- * back to uncompressed code.
- */
- goto cleanup_and_bail_uncompressed;
- }
-
if (0 < inode->defrag_compress && inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) {
compress_type = inode->defrag_compress;
compress_level = inode->defrag_compress_level;
@@ -988,11 +996,15 @@ static void compress_file_range(struct btrfs_work *work)
}
/* Compression level is applied here. */
- ret = btrfs_compress_folios(compress_type, compress_level,
- inode, start, folios, &nr_folios, &total_in,
- &total_compressed);
- if (ret)
+ cb = btrfs_compress_bio(inode, start, cur_len, compress_type,
+ compress_level, async_chunk->write_flags);
+ if (IS_ERR(cb)) {
+ cb = NULL;
goto mark_incompressible;
+ }
+
+ total_compressed = cb->bbio.bio.bi_iter.bi_size;
+ total_in = cur_len;
/*
* Zero the tail end of the last folio, as we might be sending it down
@@ -1000,7 +1012,7 @@ static void compress_file_range(struct btrfs_work *work)
*/
loff = (total_compressed & (min_folio_size - 1));
if (loff)
- folio_zero_range(folios[nr_folios - 1], loff, min_folio_size - loff);
+ zero_last_folio(cb);
/*
* Try to create an inline extent.
@@ -1016,11 +1028,13 @@ static void compress_file_range(struct btrfs_work *work)
BTRFS_COMPRESS_NONE, NULL, false);
else
ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
- compress_type, folios[0], false);
+ compress_type,
+ bio_first_folio_all(&cb->bbio.bio), false);
if (ret <= 0) {
+ cleanup_compressed_bio(cb);
if (ret < 0)
mapping_set_error(mapping, -EIO);
- goto free_pages;
+ return;
}
/*
@@ -1028,6 +1042,7 @@ static void compress_file_range(struct btrfs_work *work)
* block size boundary so the allocator does sane things.
*/
total_compressed = ALIGN(total_compressed, blocksize);
+ round_up_last_block(cb, blocksize);
/*
* One last check to make sure the compression is really a win, compare
@@ -1038,12 +1053,12 @@ static void compress_file_range(struct btrfs_work *work)
if (total_compressed + blocksize > total_in)
goto mark_incompressible;
+
/*
* The async work queues will take care of doing actual allocation on
* disk for these compressed pages, and will submit the bios.
*/
- ret = add_async_extent(async_chunk, start, total_in, total_compressed, folios,
- nr_folios, compress_type);
+ ret = add_async_extent(async_chunk, start, total_in, cb);
BUG_ON(ret);
if (start + total_in < end) {
start += total_in;
@@ -1056,33 +1071,10 @@ static void compress_file_range(struct btrfs_work *work)
if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
inode->flags |= BTRFS_INODE_NOCOMPRESS;
cleanup_and_bail_uncompressed:
- ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
- BTRFS_COMPRESS_NONE);
+ ret = add_async_extent(async_chunk, start, end - start + 1, NULL);
BUG_ON(ret);
-free_pages:
- if (folios) {
- for (i = 0; i < nr_folios; i++) {
- WARN_ON(folios[i]->mapping);
- btrfs_free_compr_folio(folios[i]);
- }
- kfree(folios);
- }
-}
-
-static void free_async_extent_pages(struct async_extent *async_extent)
-{
- int i;
-
- if (!async_extent->folios)
- return;
-
- for (i = 0; i < async_extent->nr_folios; i++) {
- WARN_ON(async_extent->folios[i]->mapping);
- btrfs_free_compr_folio(async_extent->folios[i]);
- }
- kfree(async_extent->folios);
- async_extent->nr_folios = 0;
- async_extent->folios = NULL;
+ if (cb)
+ cleanup_compressed_bio(cb);
}
static void submit_uncompressed_range(struct btrfs_inode *inode,
@@ -1129,7 +1121,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
struct extent_state *cached = NULL;
struct extent_map *em;
int ret = 0;
- bool free_pages = false;
+ u32 compressed_size;
u64 start = async_extent->start;
u64 end = async_extent->start + async_extent->ram_size - 1;
@@ -1149,17 +1141,14 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
locked_folio = async_chunk->locked_folio;
}
- if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
- ASSERT(!async_extent->folios);
- ASSERT(async_extent->nr_folios == 0);
+ if (!async_extent->cb) {
submit_uncompressed_range(inode, async_extent, locked_folio);
- free_pages = true;
goto done;
}
+ compressed_size = async_extent->cb->bbio.bio.bi_iter.bi_size;
ret = btrfs_reserve_extent(root, async_extent->ram_size,
- async_extent->compressed_size,
- async_extent->compressed_size,
+ compressed_size, compressed_size,
0, *alloc_hint, &ins, true, true);
if (ret) {
/*
@@ -1169,7 +1158,8 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
* fall back to uncompressed.
*/
submit_uncompressed_range(inode, async_extent, locked_folio);
- free_pages = true;
+ cleanup_compressed_bio(async_extent->cb);
+ async_extent->cb = NULL;
goto done;
}
@@ -1181,7 +1171,9 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
file_extent.ram_bytes = async_extent->ram_size;
file_extent.num_bytes = async_extent->ram_size;
file_extent.offset = 0;
- file_extent.compression = async_extent->compress_type;
+ file_extent.compression = async_extent->cb->compress_type;
+
+ async_extent->cb->bbio.bio.bi_iter.bi_sector = ins.objectid >> SECTOR_SHIFT;
em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(em)) {
@@ -1197,22 +1189,20 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
ret = PTR_ERR(ordered);
goto out_free_reserve;
}
+ async_extent->cb->bbio.ordered = ordered;
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
/* Clear dirty, set writeback and unlock the pages. */
extent_clear_unlock_delalloc(inode, start, end,
NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_START_WRITEBACK);
- btrfs_submit_compressed_write(ordered,
- async_extent->folios, /* compressed_folios */
- async_extent->nr_folios,
- async_chunk->write_flags, true);
+ btrfs_submit_bbio(&async_extent->cb->bbio, 0);
+ async_extent->cb = NULL;
+
*alloc_hint = ins.objectid + ins.offset;
done:
if (async_chunk->blkcg_css)
kthread_associate_blkcg(NULL);
- if (free_pages)
- free_async_extent_pages(async_extent);
kfree(async_extent);
return;
@@ -1227,7 +1217,8 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
- free_async_extent_pages(async_extent);
+ if (async_extent->cb)
+ cleanup_compressed_bio(async_extent->cb);
if (async_chunk->blkcg_css)
kthread_associate_blkcg(NULL);
btrfs_debug(fs_info,
--
2.52.0
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH v3 6/9] btrfs: remove the old btrfs_compress_folios() infrastructures
2026-01-27 3:10 [PATCH v3 0/9] btrfs: used compressed_bio structure for read and write Qu Wenruo
` (4 preceding siblings ...)
2026-01-27 3:10 ` [PATCH v3 5/9] btrfs: switch to btrfs_compress_bio() interface for compressed writes Qu Wenruo
@ 2026-01-27 3:10 ` Qu Wenruo
2026-01-27 3:10 ` [PATCH v3 7/9] btrfs: get rid of compressed_folios[] usage for compressed read Qu Wenruo
` (2 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Qu Wenruo @ 2026-01-27 3:10 UTC (permalink / raw)
To: linux-btrfs
Since it's replaced by btrfs_compress_bio(), remove all involved
functions.
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
fs/btrfs/compression.c | 70 ---------------
fs/btrfs/compression.h | 12 ---
fs/btrfs/inode.c | 2 +-
fs/btrfs/lzo.c | 188 ----------------------------------------
fs/btrfs/zlib.c | 189 -----------------------------------------
fs/btrfs/zstd.c | 189 -----------------------------------------
6 files changed, 1 insertion(+), 649 deletions(-)
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 1d4e7c7c25c3..1bf17c269524 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -86,37 +86,6 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
return false;
}
-static int compression_compress_pages(int type, struct list_head *ws,
- struct btrfs_inode *inode, u64 start,
- struct folio **folios, unsigned long *out_folios,
- unsigned long *total_in, unsigned long *total_out)
-{
- switch (type) {
- case BTRFS_COMPRESS_ZLIB:
- return zlib_compress_folios(ws, inode, start, folios,
- out_folios, total_in, total_out);
- case BTRFS_COMPRESS_LZO:
- return lzo_compress_folios(ws, inode, start, folios,
- out_folios, total_in, total_out);
- case BTRFS_COMPRESS_ZSTD:
- return zstd_compress_folios(ws, inode, start, folios,
- out_folios, total_in, total_out);
- case BTRFS_COMPRESS_NONE:
- default:
- /*
- * This can happen when compression races with remount setting
- * it to 'no compress', while caller doesn't call
- * inode_need_compress() to check if we really need to
- * compress.
- *
- * Not a big deal, just need to inform caller that we
- * haven't allocated any pages yet.
- */
- *out_folios = 0;
- return -E2BIG;
- }
-}
-
static int compression_decompress_bio(struct list_head *ws,
struct compressed_bio *cb)
{
@@ -1023,45 +992,6 @@ int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
return 0;
}
-/*
- * Given an address space and start and length, compress the bytes into @pages
- * that are allocated on demand.
- *
- * @type_level is encoded algorithm and level, where level 0 means whatever
- * default the algorithm chooses and is opaque here;
- * - compression algo are 0-3
- * - the level are bits 4-7
- *
- * @out_folios is an in/out parameter, holds maximum number of folios to allocate
- * and returns number of actually allocated folios
- *
- * @total_in is used to return the number of bytes actually read. It
- * may be smaller than the input length if we had to exit early because we
- * ran out of room in the folios array or because we cross the
- * max_out threshold.
- *
- * @total_out is an in/out parameter, must be set to the input length and will
- * be also used to return the total number of compressed bytes
- */
-int btrfs_compress_folios(unsigned int type, int level, struct btrfs_inode *inode,
- u64 start, struct folio **folios, unsigned long *out_folios,
- unsigned long *total_in, unsigned long *total_out)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- const unsigned long orig_len = *total_out;
- struct list_head *workspace;
- int ret;
-
- level = btrfs_compress_set_level(type, level);
- workspace = get_workspace(fs_info, type, level);
- ret = compression_compress_pages(type, workspace, inode, start, folios,
- out_folios, total_in, total_out);
- /* The total read-in bytes should be no larger than the input. */
- ASSERT(*total_in <= orig_len);
- put_workspace(fs_info, type, workspace);
- return ret;
-}
-
/*
* Given an address space and start and length, compress the page cache
* contents into @cb.
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index fd0cce5d07cf..7dc48e556313 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -91,9 +91,6 @@ int __init btrfs_init_compress(void);
void __cold btrfs_exit_compress(void);
bool btrfs_compress_level_valid(unsigned int type, int level);
-int btrfs_compress_folios(unsigned int type, int level, struct btrfs_inode *inode,
- u64 start, struct folio **folios, unsigned long *out_folios,
- unsigned long *total_in, unsigned long *total_out);
int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
unsigned long dest_pgoff, size_t srclen, size_t destlen);
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
@@ -160,9 +157,6 @@ static inline void cleanup_compressed_bio(struct compressed_bio *cb)
bio_put(bio);
}
-int zlib_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
- u64 start, struct folio **folios, unsigned long *out_folios,
- unsigned long *total_in, unsigned long *total_out);
int zlib_compress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress(struct list_head *ws, const u8 *data_in,
@@ -172,9 +166,6 @@ struct list_head *zlib_alloc_workspace(struct btrfs_fs_info *fs_info, unsigned i
void zlib_free_workspace(struct list_head *ws);
struct list_head *zlib_get_workspace(struct btrfs_fs_info *fs_info, unsigned int level);
-int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
- u64 start, struct folio **folios, unsigned long *out_folios,
- unsigned long *total_in, unsigned long *total_out);
int lzo_compress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
@@ -183,9 +174,6 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
struct list_head *lzo_alloc_workspace(struct btrfs_fs_info *fs_info);
void lzo_free_workspace(struct list_head *ws);
-int zstd_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
- u64 start, struct folio **folios, unsigned long *out_folios,
- unsigned long *total_in, unsigned long *total_out);
int zstd_compress_bio(struct list_head *ws, struct compressed_bio *cb);
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zstd_decompress(struct list_head *ws, const u8 *data_in,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index aafffb72dd0e..d010621b64d5 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -942,7 +942,7 @@ static void compress_file_range(struct btrfs_work *work)
/*
* All the folios should have been locked thus no failure.
*
- * And even if some folios are missing, btrfs_compress_folios()
+ * And even if some folios are missing, btrfs_compress_bio()
* would handle them correctly, so here just do an ASSERT() check for
* early logic errors.
*/
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 7314ab500005..4a7abb0b9809 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -122,98 +122,6 @@ static inline size_t read_compress_length(const char *buf)
return le32_to_cpu(dlen);
}
-/*
- * Will do:
- *
- * - Write a segment header into the destination
- * - Copy the compressed buffer into the destination
- * - Make sure we have enough space in the last sector to fit a segment header
- * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
- *
- * Will allocate new pages when needed.
- */
-static int copy_compressed_data_to_page(struct btrfs_fs_info *fs_info,
- char *compressed_data,
- size_t compressed_size,
- struct folio **out_folios,
- unsigned long max_nr_folio,
- u32 *cur_out)
-{
- const u32 sectorsize = fs_info->sectorsize;
- const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
- u32 sector_bytes_left;
- u32 orig_out;
- struct folio *cur_folio;
- char *kaddr;
-
- if ((*cur_out >> min_folio_shift) >= max_nr_folio)
- return -E2BIG;
-
- /*
- * We never allow a segment header crossing sector boundary, previous
- * run should ensure we have enough space left inside the sector.
- */
- ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
-
- cur_folio = out_folios[*cur_out >> min_folio_shift];
- /* Allocate a new page */
- if (!cur_folio) {
- cur_folio = btrfs_alloc_compr_folio(fs_info);
- if (!cur_folio)
- return -ENOMEM;
- out_folios[*cur_out >> min_folio_shift] = cur_folio;
- }
-
- kaddr = kmap_local_folio(cur_folio, offset_in_folio(cur_folio, *cur_out));
- write_compress_length(kaddr, compressed_size);
- *cur_out += LZO_LEN;
-
- orig_out = *cur_out;
-
- /* Copy compressed data */
- while (*cur_out - orig_out < compressed_size) {
- u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
- orig_out + compressed_size - *cur_out);
-
- kunmap_local(kaddr);
-
- if ((*cur_out >> min_folio_shift) >= max_nr_folio)
- return -E2BIG;
-
- cur_folio = out_folios[*cur_out >> min_folio_shift];
- /* Allocate a new page */
- if (!cur_folio) {
- cur_folio = btrfs_alloc_compr_folio(fs_info);
- if (!cur_folio)
- return -ENOMEM;
- out_folios[*cur_out >> min_folio_shift] = cur_folio;
- }
- kaddr = kmap_local_folio(cur_folio, 0);
-
- memcpy(kaddr + offset_in_folio(cur_folio, *cur_out),
- compressed_data + *cur_out - orig_out, copy_len);
-
- *cur_out += copy_len;
- }
-
- /*
- * Check if we can fit the next segment header into the remaining space
- * of the sector.
- */
- sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
- if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
- goto out;
-
- /* The remaining size is not enough, pad it with zeros */
- memset(kaddr + offset_in_page(*cur_out), 0,
- sector_bytes_left);
- *cur_out += sector_bytes_left;
-
-out:
- kunmap_local(kaddr);
- return 0;
-}
-
/*
* Write data into @out_folio and queue it into @out_bio.
*
@@ -367,102 +275,6 @@ static int copy_compressed_data_to_bio(struct btrfs_fs_info *fs_info,
sector_bytes_left);
}
-int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
- u64 start, struct folio **folios, unsigned long *out_folios,
- unsigned long *total_in, unsigned long *total_out)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct workspace *workspace = list_entry(ws, struct workspace, list);
- const u32 sectorsize = fs_info->sectorsize;
- const u32 min_folio_size = btrfs_min_folio_size(fs_info);
- struct address_space *mapping = inode->vfs_inode.i_mapping;
- struct folio *folio_in = NULL;
- char *sizes_ptr;
- const unsigned long max_nr_folio = *out_folios;
- int ret = 0;
- /* Points to the file offset of input data */
- u64 cur_in = start;
- /* Points to the current output byte */
- u32 cur_out = 0;
- u32 len = *total_out;
-
- ASSERT(max_nr_folio > 0);
- *out_folios = 0;
- *total_out = 0;
- *total_in = 0;
-
- /*
- * Skip the header for now, we will later come back and write the total
- * compressed size
- */
- cur_out += LZO_LEN;
- while (cur_in < start + len) {
- char *data_in;
- const u32 sectorsize_mask = sectorsize - 1;
- u32 sector_off = (cur_in - start) & sectorsize_mask;
- u32 in_len;
- size_t out_len;
-
- /* Get the input page first */
- if (!folio_in) {
- ret = btrfs_compress_filemap_get_folio(mapping, cur_in, &folio_in);
- if (ret < 0)
- goto out;
- }
-
- /* Compress at most one sector of data each time */
- in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
- ASSERT(in_len);
- data_in = kmap_local_folio(folio_in, offset_in_folio(folio_in, cur_in));
- ret = lzo1x_1_compress(data_in, in_len,
- workspace->cbuf, &out_len,
- workspace->mem);
- kunmap_local(data_in);
- if (unlikely(ret < 0)) {
- /* lzo1x_1_compress never fails. */
- ret = -EIO;
- goto out;
- }
-
- ret = copy_compressed_data_to_page(fs_info, workspace->cbuf, out_len,
- folios, max_nr_folio,
- &cur_out);
- if (ret < 0)
- goto out;
-
- cur_in += in_len;
-
- /*
- * Check if we're making it bigger after two sectors. And if
- * it is so, give up.
- */
- if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) {
- ret = -E2BIG;
- goto out;
- }
-
- /* Check if we have reached folio boundary. */
- if (IS_ALIGNED(cur_in, min_folio_size)) {
- folio_put(folio_in);
- folio_in = NULL;
- }
- }
-
- /* Store the size of all chunks of compressed data */
- sizes_ptr = kmap_local_folio(folios[0], 0);
- write_compress_length(sizes_ptr, cur_out);
- kunmap_local(sizes_ptr);
-
- ret = 0;
- *total_out = cur_out;
- *total_in = cur_in - start;
-out:
- if (folio_in)
- folio_put(folio_in);
- *out_folios = DIV_ROUND_UP(cur_out, min_folio_size);
- return ret;
-}
-
int lzo_compress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct btrfs_inode *inode = cb->bbio.inode;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 6f2a43f06b5c..d378c7f5b047 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -145,195 +145,6 @@ static int copy_data_into_buffer(struct address_space *mapping,
return 0;
}
-int zlib_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
- u64 start, struct folio **folios, unsigned long *out_folios,
- unsigned long *total_in, unsigned long *total_out)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct workspace *workspace = list_entry(ws, struct workspace, list);
- struct address_space *mapping = inode->vfs_inode.i_mapping;
- const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order;
- const u32 min_folio_size = btrfs_min_folio_size(fs_info);
- int ret;
- char *data_in = NULL;
- char *cfolio_out;
- int nr_folios = 0;
- struct folio *in_folio = NULL;
- struct folio *out_folio = NULL;
- unsigned long len = *total_out;
- unsigned long nr_dest_folios = *out_folios;
- const unsigned long max_out = nr_dest_folios << min_folio_shift;
- const u32 blocksize = fs_info->sectorsize;
- const u64 orig_end = start + len;
-
- *out_folios = 0;
- *total_out = 0;
- *total_in = 0;
-
- ret = zlib_deflateInit(&workspace->strm, workspace->level);
- if (unlikely(ret != Z_OK)) {
- btrfs_err(fs_info,
- "zlib compression init failed, error %d root %llu inode %llu offset %llu",
- ret, btrfs_root_id(inode->root), btrfs_ino(inode), start);
- ret = -EIO;
- goto out;
- }
-
- workspace->strm.total_in = 0;
- workspace->strm.total_out = 0;
-
- out_folio = btrfs_alloc_compr_folio(fs_info);
- if (out_folio == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- cfolio_out = folio_address(out_folio);
- folios[0] = out_folio;
- nr_folios = 1;
-
- workspace->strm.next_in = workspace->buf;
- workspace->strm.avail_in = 0;
- workspace->strm.next_out = cfolio_out;
- workspace->strm.avail_out = min_folio_size;
-
- while (workspace->strm.total_in < len) {
- /*
- * Get next input pages and copy the contents to
- * the workspace buffer if required.
- */
- if (workspace->strm.avail_in == 0) {
- unsigned long bytes_left = len - workspace->strm.total_in;
- unsigned int copy_length = min(bytes_left, workspace->buf_size);
-
- /*
- * For s390 hardware accelerated zlib, and our folio is smaller
- * than the copy_length, we need to fill the buffer so that
- * we can take full advantage of hardware acceleration.
- */
- if (need_special_buffer(fs_info)) {
- ret = copy_data_into_buffer(mapping, workspace,
- start, copy_length);
- if (ret < 0)
- goto out;
- start += copy_length;
- workspace->strm.next_in = workspace->buf;
- workspace->strm.avail_in = copy_length;
- } else {
- unsigned int cur_len;
-
- if (data_in) {
- kunmap_local(data_in);
- folio_put(in_folio);
- data_in = NULL;
- }
- ret = btrfs_compress_filemap_get_folio(mapping,
- start, &in_folio);
- if (ret < 0)
- goto out;
- cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
- data_in = kmap_local_folio(in_folio,
- offset_in_folio(in_folio, start));
- start += cur_len;
- workspace->strm.next_in = data_in;
- workspace->strm.avail_in = cur_len;
- }
- }
-
- ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
- if (unlikely(ret != Z_OK)) {
- btrfs_warn(fs_info,
- "zlib compression failed, error %d root %llu inode %llu offset %llu",
- ret, btrfs_root_id(inode->root), btrfs_ino(inode),
- start);
- zlib_deflateEnd(&workspace->strm);
- ret = -EIO;
- goto out;
- }
-
- /* we're making it bigger, give up */
- if (workspace->strm.total_in > blocksize * 2 &&
- workspace->strm.total_in <
- workspace->strm.total_out) {
- ret = -E2BIG;
- goto out;
- }
- /* we need another page for writing out. Test this
- * before the total_in so we will pull in a new page for
- * the stream end if required
- */
- if (workspace->strm.avail_out == 0) {
- if (nr_folios == nr_dest_folios) {
- ret = -E2BIG;
- goto out;
- }
- out_folio = btrfs_alloc_compr_folio(fs_info);
- if (out_folio == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- cfolio_out = folio_address(out_folio);
- folios[nr_folios] = out_folio;
- nr_folios++;
- workspace->strm.avail_out = min_folio_size;
- workspace->strm.next_out = cfolio_out;
- }
- /* we're all done */
- if (workspace->strm.total_in >= len)
- break;
- if (workspace->strm.total_out > max_out)
- break;
- }
- workspace->strm.avail_in = 0;
- /*
- * Call deflate with Z_FINISH flush parameter providing more output
- * space but no more input data, until it returns with Z_STREAM_END.
- */
- while (ret != Z_STREAM_END) {
- ret = zlib_deflate(&workspace->strm, Z_FINISH);
- if (ret == Z_STREAM_END)
- break;
- if (unlikely(ret != Z_OK && ret != Z_BUF_ERROR)) {
- zlib_deflateEnd(&workspace->strm);
- ret = -EIO;
- goto out;
- } else if (workspace->strm.avail_out == 0) {
- /* Get another folio for the stream end. */
- if (nr_folios == nr_dest_folios) {
- ret = -E2BIG;
- goto out;
- }
- out_folio = btrfs_alloc_compr_folio(fs_info);
- if (out_folio == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- cfolio_out = folio_address(out_folio);
- folios[nr_folios] = out_folio;
- nr_folios++;
- workspace->strm.avail_out = min_folio_size;
- workspace->strm.next_out = cfolio_out;
- }
- }
- zlib_deflateEnd(&workspace->strm);
-
- if (workspace->strm.total_out >= workspace->strm.total_in) {
- ret = -E2BIG;
- goto out;
- }
-
- ret = 0;
- *total_out = workspace->strm.total_out;
- *total_in = workspace->strm.total_in;
-out:
- *out_folios = nr_folios;
- if (data_in) {
- kunmap_local(data_in);
- folio_put(in_folio);
- }
-
- return ret;
-}
-
int zlib_compress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct btrfs_inode *inode = cb->bbio.inode;
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index ce204a9300b5..caaf8d8213de 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -396,195 +396,6 @@ struct list_head *zstd_alloc_workspace(struct btrfs_fs_info *fs_info, int level)
return ERR_PTR(-ENOMEM);
}
-int zstd_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
- u64 start, struct folio **folios, unsigned long *out_folios,
- unsigned long *total_in, unsigned long *total_out)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct workspace *workspace = list_entry(ws, struct workspace, list);
- struct address_space *mapping = inode->vfs_inode.i_mapping;
- zstd_cstream *stream;
- int ret = 0;
- int nr_folios = 0;
- struct folio *in_folio = NULL; /* The current folio to read. */
- struct folio *out_folio = NULL; /* The current folio to write to. */
- unsigned long tot_in = 0;
- unsigned long tot_out = 0;
- unsigned long len = *total_out;
- const unsigned long nr_dest_folios = *out_folios;
- const u64 orig_end = start + len;
- const u32 blocksize = fs_info->sectorsize;
- const u32 min_folio_size = btrfs_min_folio_size(fs_info);
- unsigned long max_out = nr_dest_folios * min_folio_size;
- unsigned int cur_len;
-
- workspace->params = zstd_get_btrfs_parameters(workspace->req_level, len);
- *out_folios = 0;
- *total_out = 0;
- *total_in = 0;
-
- /* Initialize the stream */
- stream = zstd_init_cstream(&workspace->params, len, workspace->mem,
- workspace->size);
- if (unlikely(!stream)) {
- btrfs_err(fs_info,
- "zstd compression init level %d failed, root %llu inode %llu offset %llu",
- workspace->req_level, btrfs_root_id(inode->root),
- btrfs_ino(inode), start);
- ret = -EIO;
- goto out;
- }
-
- /* map in the first page of input data */
- ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
- if (ret < 0)
- goto out;
- cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
- workspace->in_buf.src = kmap_local_folio(in_folio, offset_in_folio(in_folio, start));
- workspace->in_buf.pos = 0;
- workspace->in_buf.size = cur_len;
-
- /* Allocate and map in the output buffer */
- out_folio = btrfs_alloc_compr_folio(fs_info);
- if (out_folio == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- folios[nr_folios++] = out_folio;
- workspace->out_buf.dst = folio_address(out_folio);
- workspace->out_buf.pos = 0;
- workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
-
- while (1) {
- size_t ret2;
-
- ret2 = zstd_compress_stream(stream, &workspace->out_buf,
- &workspace->in_buf);
- if (unlikely(zstd_is_error(ret2))) {
- btrfs_warn(fs_info,
-"zstd compression level %d failed, error %d root %llu inode %llu offset %llu",
- workspace->req_level, zstd_get_error_code(ret2),
- btrfs_root_id(inode->root), btrfs_ino(inode),
- start);
- ret = -EIO;
- goto out;
- }
-
- /* Check to see if we are making it bigger */
- if (tot_in + workspace->in_buf.pos > blocksize * 2 &&
- tot_in + workspace->in_buf.pos <
- tot_out + workspace->out_buf.pos) {
- ret = -E2BIG;
- goto out;
- }
-
- /* We've reached the end of our output range */
- if (workspace->out_buf.pos >= max_out) {
- tot_out += workspace->out_buf.pos;
- ret = -E2BIG;
- goto out;
- }
-
- /* Check if we need more output space */
- if (workspace->out_buf.pos == workspace->out_buf.size) {
- tot_out += min_folio_size;
- max_out -= min_folio_size;
- if (nr_folios == nr_dest_folios) {
- ret = -E2BIG;
- goto out;
- }
- out_folio = btrfs_alloc_compr_folio(fs_info);
- if (out_folio == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- folios[nr_folios++] = out_folio;
- workspace->out_buf.dst = folio_address(out_folio);
- workspace->out_buf.pos = 0;
- workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
- }
-
- /* We've reached the end of the input */
- if (workspace->in_buf.pos >= len) {
- tot_in += workspace->in_buf.pos;
- break;
- }
-
- /* Check if we need more input */
- if (workspace->in_buf.pos == workspace->in_buf.size) {
- tot_in += workspace->in_buf.size;
- kunmap_local(workspace->in_buf.src);
- workspace->in_buf.src = NULL;
- folio_put(in_folio);
- start += cur_len;
- len -= cur_len;
- ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
- if (ret < 0)
- goto out;
- cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
- workspace->in_buf.src = kmap_local_folio(in_folio,
- offset_in_folio(in_folio, start));
- workspace->in_buf.pos = 0;
- workspace->in_buf.size = cur_len;
- }
- }
- while (1) {
- size_t ret2;
-
- ret2 = zstd_end_stream(stream, &workspace->out_buf);
- if (unlikely(zstd_is_error(ret2))) {
- btrfs_err(fs_info,
-"zstd compression end level %d failed, error %d root %llu inode %llu offset %llu",
- workspace->req_level, zstd_get_error_code(ret2),
- btrfs_root_id(inode->root), btrfs_ino(inode),
- start);
- ret = -EIO;
- goto out;
- }
- if (ret2 == 0) {
- tot_out += workspace->out_buf.pos;
- break;
- }
- if (workspace->out_buf.pos >= max_out) {
- tot_out += workspace->out_buf.pos;
- ret = -E2BIG;
- goto out;
- }
-
- tot_out += min_folio_size;
- max_out -= min_folio_size;
- if (nr_folios == nr_dest_folios) {
- ret = -E2BIG;
- goto out;
- }
- out_folio = btrfs_alloc_compr_folio(fs_info);
- if (out_folio == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- folios[nr_folios++] = out_folio;
- workspace->out_buf.dst = folio_address(out_folio);
- workspace->out_buf.pos = 0;
- workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
- }
-
- if (tot_out >= tot_in) {
- ret = -E2BIG;
- goto out;
- }
-
- ret = 0;
- *total_in = tot_in;
- *total_out = tot_out;
-out:
- *out_folios = nr_folios;
- if (workspace->in_buf.src) {
- kunmap_local(workspace->in_buf.src);
- folio_put(in_folio);
- }
- return ret;
-}
-
int zstd_compress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct btrfs_inode *inode = cb->bbio.inode;
--
2.52.0
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH v3 7/9] btrfs: get rid of compressed_folios[] usage for compressed read
2026-01-27 3:10 [PATCH v3 0/9] btrfs: used compressed_bio structure for read and write Qu Wenruo
` (5 preceding siblings ...)
2026-01-27 3:10 ` [PATCH v3 6/9] btrfs: remove the old btrfs_compress_folios() infrastructures Qu Wenruo
@ 2026-01-27 3:10 ` Qu Wenruo
2026-01-27 3:10 ` [PATCH v3 8/9] btrfs: get rid of compressed_folios[] usage for encoded writes Qu Wenruo
2026-01-27 3:10 ` [PATCH v3 9/9] btrfs: get rid of compressed_bio::compressed_folios[] Qu Wenruo
8 siblings, 0 replies; 16+ messages in thread
From: Qu Wenruo @ 2026-01-27 3:10 UTC (permalink / raw)
To: linux-btrfs
Currently btrfs_submit_compressed_read() still uses
compressed_bio::compressed_folios[] array.
Change it to allocate each folio and queue them into the compressed bio
so that we do not need to allocate that array.
Considering how small each compressed read bio is (less than 128KiB), we
do not benefit that much from btrfs_alloc_folio_array() anyway,
meanwhile we may benefit more from btrfs_alloc_compr_folio() by using
the global folio pool.
So chaning from btrfs_alloc_folio_array() to btrfs_alloc_compr_folio()
in a loop should still be fine.
This removes one error path, and paves the way to completely remove
compressed_folios[] array.
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
fs/btrfs/compression.c | 42 ++++++++++++++++++++++--------------------
1 file changed, 22 insertions(+), 20 deletions(-)
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 1bf17c269524..c018b3c4554e 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -239,7 +239,7 @@ static void end_bbio_compressed_read(struct btrfs_bio *bbio)
btrfs_bio_end_io(cb->orig_bbio, status);
bio_for_each_folio_all(fi, &bbio->bio)
- folio_put(fi.folio);
+ btrfs_free_compr_folio(fi.folio);
bio_put(&bbio->bio);
}
@@ -537,13 +537,13 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
struct extent_map_tree *em_tree = &inode->extent_tree;
struct compressed_bio *cb;
unsigned int compressed_len;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
u64 file_offset = bbio->file_offset;
u64 em_len;
u64 em_start;
struct extent_map *em;
unsigned long pflags;
int memstall = 0;
- blk_status_t status;
int ret;
/* we need the actual starting offset of this extent in the file */
@@ -551,7 +551,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
em = btrfs_lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
read_unlock(&em_tree->lock);
if (!em) {
- status = BLK_STS_IOERR;
+ ret = -EIO;
goto out;
}
@@ -573,27 +573,31 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
btrfs_free_extent_map(em);
- cb->nr_folios = DIV_ROUND_UP(compressed_len, btrfs_min_folio_size(fs_info));
- cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct folio *), GFP_NOFS);
- if (!cb->compressed_folios) {
- status = BLK_STS_RESOURCE;
- goto out_free_bio;
- }
+ for (int i = 0; i * min_folio_size < compressed_len; i++) {
+ struct folio *folio;
+ u32 cur_len = min(compressed_len - i * min_folio_size,
+ min_folio_size);
- ret = btrfs_alloc_folio_array(cb->nr_folios, fs_info->block_min_order,
- cb->compressed_folios);
- if (ret) {
- status = BLK_STS_RESOURCE;
- goto out_free_compressed_pages;
+ folio = btrfs_alloc_compr_folio(fs_info);
+ if (!folio) {
+ ret = -ENOMEM;
+ goto out_free_bio;
+ }
+
+ ret = bio_add_folio(&cb->bbio.bio, folio, cur_len, 0);
+ if (unlikely(!ret)) {
+ folio_put(folio);
+ ret = -EINVAL;
+ goto out_free_bio;
+ }
}
+ ASSERT(cb->bbio.bio.bi_iter.bi_size == compressed_len);
add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall,
&pflags);
- /* include any pages we added in add_ra-bio_pages */
cb->len = bbio->bio.bi_iter.bi_size;
cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
- btrfs_add_compressed_bio_folios(cb);
if (memstall)
psi_memstall_leave(&pflags);
@@ -601,12 +605,10 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
btrfs_submit_bbio(&cb->bbio, 0);
return;
-out_free_compressed_pages:
- kfree(cb->compressed_folios);
out_free_bio:
- bio_put(&cb->bbio.bio);
+ cleanup_compressed_bio(cb);
out:
- btrfs_bio_end_io(bbio, status);
+ btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
}
/*
--
2.52.0
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH v3 8/9] btrfs: get rid of compressed_folios[] usage for encoded writes
2026-01-27 3:10 [PATCH v3 0/9] btrfs: used compressed_bio structure for read and write Qu Wenruo
` (6 preceding siblings ...)
2026-01-27 3:10 ` [PATCH v3 7/9] btrfs: get rid of compressed_folios[] usage for compressed read Qu Wenruo
@ 2026-01-27 3:10 ` Qu Wenruo
2026-01-27 20:28 ` Boris Burkov
2026-01-27 3:10 ` [PATCH v3 9/9] btrfs: get rid of compressed_bio::compressed_folios[] Qu Wenruo
8 siblings, 1 reply; 16+ messages in thread
From: Qu Wenruo @ 2026-01-27 3:10 UTC (permalink / raw)
To: linux-btrfs
Currently only encoded writes utilized btrfs_submit_compressed_write(),
which utilized compressed_bio::compressed_folios[] array.
Change the only call site to call the new helper,
btrfs_alloc_compressed_write(), to allocate a compressed bio, then queue
needed folios into that bio, and finally call
btrfs_submit_compressed_write() to submit the compreseed bio.
This change has one hidden benefit, previously we use
btrfs_alloc_folio_array() for the folios of
btrfs_submit_compressed_read(), which doesn't utilize the compression
page pool for bs == ps cases.
Now we call btrfs_alloc_compr_folio() which will benefit from page pool.
The other obvious benefit is that we no longer need to allocate an array
to hold all those folios, thus one less error path.
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
fs/btrfs/compression.c | 53 +++++++++++++++++----------------------
fs/btrfs/compression.h | 6 ++---
fs/btrfs/inode.c | 56 +++++++++++++++++++++++-------------------
3 files changed, 56 insertions(+), 59 deletions(-)
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index c018b3c4554e..205f6828c1e6 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -304,25 +304,6 @@ static void end_bbio_compressed_write(struct btrfs_bio *bbio)
bio_put(&cb->bbio.bio);
}
-static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
-{
- struct bio *bio = &cb->bbio.bio;
- u32 offset = 0;
- unsigned int findex = 0;
-
- while (offset < cb->compressed_len) {
- struct folio *folio = cb->compressed_folios[findex];
- u32 len = min_t(u32, cb->compressed_len - offset, folio_size(folio));
- int ret;
-
- /* Maximum compressed extent is smaller than bio size limit. */
- ret = bio_add_folio(bio, folio, len, 0);
- ASSERT(ret);
- offset += len;
- findex++;
- }
-}
-
/*
* worker function to build and submit bios for previously compressed pages.
* The corresponding pages in the inode should be marked for writeback
@@ -333,34 +314,44 @@ static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
* the end io hooks.
*/
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
- struct folio **compressed_folios,
- unsigned int nr_folios,
- blk_opf_t write_flags,
- bool writeback)
+ struct compressed_bio *cb)
{
struct btrfs_inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct compressed_bio *cb;
ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
+ ASSERT(cb->writeback);
- cb = alloc_compressed_bio(inode, ordered->file_offset,
- REQ_OP_WRITE | write_flags,
- end_bbio_compressed_write);
cb->start = ordered->file_offset;
cb->len = ordered->num_bytes;
- cb->compressed_folios = compressed_folios;
cb->compressed_len = ordered->disk_num_bytes;
- cb->writeback = writeback;
- cb->nr_folios = nr_folios;
cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
cb->bbio.ordered = ordered;
- btrfs_add_compressed_bio_folios(cb);
btrfs_submit_bbio(&cb->bbio, 0);
}
+/*
+ * Allocate a compressed write bio for @inode file offset @start length @len.
+ *
+ * The caller still needs to properly queue all folios and populate involved
+ * members.
+ */
+struct compressed_bio *btrfs_alloc_compressed_write(struct btrfs_inode *inode,
+ u64 start, u64 len)
+{
+ struct compressed_bio *cb;
+
+ cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE,
+ end_bbio_compressed_write);
+ cb->start = start;
+ cb->len = len;
+ cb->writeback = true;
+
+ return cb;
+}
+
/*
* Add extra pages in the same compressed file extent so that we don't need to
* re-read the same extent again and again.
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 7dc48e556313..2d3a28b26997 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -96,10 +96,10 @@ int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
struct compressed_bio *cb, u32 decompressed);
+struct compressed_bio *btrfs_alloc_compressed_write(struct btrfs_inode *inode,
+ u64 start, u64 len);
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
- struct folio **compressed_folios,
- unsigned int nr_folios, blk_opf_t write_flags,
- bool writeback);
+ struct compressed_bio *cb);
void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d010621b64d5..f1df43f2e69a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9804,12 +9804,13 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
struct extent_state *cached_state = NULL;
struct btrfs_ordered_extent *ordered;
struct btrfs_file_extent file_extent;
+ struct compressed_bio *cb = NULL;
int compression;
size_t orig_count;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
u64 start, end;
u64 num_bytes, ram_bytes, disk_num_bytes;
- unsigned long nr_folios, i;
- struct folio **folios;
+ unsigned long nr_folios;
struct btrfs_key ins;
bool extent_reserved = false;
struct extent_map *em;
@@ -9899,38 +9900,45 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
*/
disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
- folios = kvcalloc(nr_folios, sizeof(struct folio *), GFP_KERNEL_ACCOUNT);
- if (!folios)
- return -ENOMEM;
- for (i = 0; i < nr_folios; i++) {
- size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
+
+ cb = btrfs_alloc_compressed_write(inode, start, num_bytes);
+ for (int i = 0; i < nr_folios; i++) {
+ struct folio *folio;
+ size_t bytes = min(min_folio_size, iov_iter_count(from));
char *kaddr;
- folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0);
- if (!folios[i]) {
+ folio = btrfs_alloc_compr_folio(fs_info);
+ if (!folio) {
ret = -ENOMEM;
- goto out_folios;
+ goto out_cb;
}
- kaddr = kmap_local_folio(folios[i], 0);
+ kaddr = kmap_local_folio(folio, 0);
if (copy_from_iter(kaddr, bytes, from) != bytes) {
kunmap_local(kaddr);
+ folio_put(folio);
ret = -EFAULT;
- goto out_folios;
+ goto out_cb;
+ }
+ if (bytes < min_folio_size)
+ folio_zero_range(folio, bytes, min_folio_size - bytes);
+ ret = bio_add_folio(&cb->bbio.bio, folio, folio_size(folio), 0);
+ if (!unlikely(ret)) {
+ folio_put(folio);
+ ret = -EINVAL;
+ goto out_cb;
}
- if (bytes < PAGE_SIZE)
- memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
- kunmap_local(kaddr);
}
+ ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
for (;;) {
ret = btrfs_wait_ordered_range(inode, start, num_bytes);
if (ret)
- goto out_folios;
+ goto out_cb;
ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
start >> PAGE_SHIFT,
end >> PAGE_SHIFT);
if (ret)
- goto out_folios;
+ goto out_cb;
btrfs_lock_extent(io_tree, start, end, &cached_state);
ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
if (!ordered &&
@@ -9962,7 +9970,8 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
encoded->unencoded_offset == 0 &&
can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
ret = __cow_file_range_inline(inode, encoded->len,
- orig_count, compression, folios[0],
+ orig_count, compression,
+ bio_first_folio_all(&cb->bbio.bio),
true);
if (ret <= 0) {
if (ret == 0)
@@ -10007,7 +10016,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
btrfs_delalloc_release_extents(inode, num_bytes);
- btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false);
+ btrfs_submit_compressed_write(ordered, cb);
ret = orig_count;
goto out;
@@ -10029,12 +10038,9 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
out_unlock:
btrfs_unlock_extent(io_tree, start, end, &cached_state);
-out_folios:
- for (i = 0; i < nr_folios; i++) {
- if (folios[i])
- folio_put(folios[i]);
- }
- kvfree(folios);
+out_cb:
+ if (cb)
+ cleanup_compressed_bio(cb);
out:
if (ret >= 0)
iocb->ki_pos += encoded->len;
--
2.52.0
^ permalink raw reply related [flat|nested] 16+ messages in thread* Re: [PATCH v3 8/9] btrfs: get rid of compressed_folios[] usage for encoded writes
2026-01-27 3:10 ` [PATCH v3 8/9] btrfs: get rid of compressed_folios[] usage for encoded writes Qu Wenruo
@ 2026-01-27 20:28 ` Boris Burkov
2026-01-27 21:10 ` Qu Wenruo
2026-01-29 1:47 ` David Sterba
0 siblings, 2 replies; 16+ messages in thread
From: Boris Burkov @ 2026-01-27 20:28 UTC (permalink / raw)
To: Qu Wenruo; +Cc: linux-btrfs
On Tue, Jan 27, 2026 at 01:40:41PM +1030, Qu Wenruo wrote:
> Currently only encoded writes utilized btrfs_submit_compressed_write(),
> which utilized compressed_bio::compressed_folios[] array.
>
> Change the only call site to call the new helper,
> btrfs_alloc_compressed_write(), to allocate a compressed bio, then queue
> needed folios into that bio, and finally call
> btrfs_submit_compressed_write() to submit the compreseed bio.
>
> This change has one hidden benefit, previously we use
> btrfs_alloc_folio_array() for the folios of
> btrfs_submit_compressed_read(), which doesn't utilize the compression
> page pool for bs == ps cases.
>
> Now we call btrfs_alloc_compr_folio() which will benefit from page pool.
>
> The other obvious benefit is that we no longer need to allocate an array
> to hold all those folios, thus one less error path.
This review is from claude using Chris's review prompts with some light
editing / checking by me.
>
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> ---
> fs/btrfs/compression.c | 53 +++++++++++++++++----------------------
> fs/btrfs/compression.h | 6 ++---
> fs/btrfs/inode.c | 56 +++++++++++++++++++++++-------------------
> 3 files changed, 56 insertions(+), 59 deletions(-)
>
> diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
> index c018b3c4554e..205f6828c1e6 100644
> --- a/fs/btrfs/compression.c
> +++ b/fs/btrfs/compression.c
> @@ -304,25 +304,6 @@ static void end_bbio_compressed_write(struct btrfs_bio *bbio)
> bio_put(&cb->bbio.bio);
> }
>
> -static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
> -{
> - struct bio *bio = &cb->bbio.bio;
> - u32 offset = 0;
> - unsigned int findex = 0;
> -
> - while (offset < cb->compressed_len) {
> - struct folio *folio = cb->compressed_folios[findex];
> - u32 len = min_t(u32, cb->compressed_len - offset, folio_size(folio));
> - int ret;
> -
> - /* Maximum compressed extent is smaller than bio size limit. */
> - ret = bio_add_folio(bio, folio, len, 0);
> - ASSERT(ret);
> - offset += len;
> - findex++;
> - }
> -}
> -
> /*
> * worker function to build and submit bios for previously compressed pages.
> * The corresponding pages in the inode should be marked for writeback
> @@ -333,34 +314,44 @@ static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
> * the end io hooks.
> */
> void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
> - struct folio **compressed_folios,
> - unsigned int nr_folios,
> - blk_opf_t write_flags,
> - bool writeback)
> + struct compressed_bio *cb)
> {
> struct btrfs_inode *inode = ordered->inode;
> struct btrfs_fs_info *fs_info = inode->root->fs_info;
> - struct compressed_bio *cb;
>
> ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
> ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
> + ASSERT(cb->writeback);
>
> - cb = alloc_compressed_bio(inode, ordered->file_offset,
> - REQ_OP_WRITE | write_flags,
> - end_bbio_compressed_write);
> cb->start = ordered->file_offset;
> cb->len = ordered->num_bytes;
> - cb->compressed_folios = compressed_folios;
> cb->compressed_len = ordered->disk_num_bytes;
> - cb->writeback = writeback;
> - cb->nr_folios = nr_folios;
> cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
> cb->bbio.ordered = ordered;
> - btrfs_add_compressed_bio_folios(cb);
>
> btrfs_submit_bbio(&cb->bbio, 0);
> }
>
> +/*
> + * Allocate a compressed write bio for @inode file offset @start length @len.
> + *
> + * The caller still needs to properly queue all folios and populate involved
> + * members.
> + */
> +struct compressed_bio *btrfs_alloc_compressed_write(struct btrfs_inode *inode,
> + u64 start, u64 len)
> +{
> + struct compressed_bio *cb;
> +
> + cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE,
> + end_bbio_compressed_write);
> + cb->start = start;
> + cb->len = len;
> + cb->writeback = true;
> +
> + return cb;
> +}
> +
> /*
> * Add extra pages in the same compressed file extent so that we don't need to
> * re-read the same extent again and again.
> diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
> index 7dc48e556313..2d3a28b26997 100644
> --- a/fs/btrfs/compression.h
> +++ b/fs/btrfs/compression.h
> @@ -96,10 +96,10 @@ int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
> int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
> struct compressed_bio *cb, u32 decompressed);
>
> +struct compressed_bio *btrfs_alloc_compressed_write(struct btrfs_inode *inode,
> + u64 start, u64 len);
> void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
> - struct folio **compressed_folios,
> - unsigned int nr_folios, blk_opf_t write_flags,
> - bool writeback);
> + struct compressed_bio *cb);
> void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
>
> int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret);
> diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
> index d010621b64d5..f1df43f2e69a 100644
> --- a/fs/btrfs/inode.c
> +++ b/fs/btrfs/inode.c
> @@ -9804,12 +9804,13 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
> struct extent_state *cached_state = NULL;
> struct btrfs_ordered_extent *ordered;
> struct btrfs_file_extent file_extent;
> + struct compressed_bio *cb = NULL;
> int compression;
> size_t orig_count;
> + const u32 min_folio_size = btrfs_min_folio_size(fs_info);
> u64 start, end;
> u64 num_bytes, ram_bytes, disk_num_bytes;
> - unsigned long nr_folios, i;
> - struct folio **folios;
> + unsigned long nr_folios;
> struct btrfs_key ins;
> bool extent_reserved = false;
> struct extent_map *em;
> @@ -9899,38 +9900,45 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
> */
> disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
> nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
> - folios = kvcalloc(nr_folios, sizeof(struct folio *), GFP_KERNEL_ACCOUNT);
> - if (!folios)
> - return -ENOMEM;
> - for (i = 0; i < nr_folios; i++) {
> - size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
> +
> + cb = btrfs_alloc_compressed_write(inode, start, num_bytes);
> + for (int i = 0; i < nr_folios; i++) {
> + struct folio *folio;
> + size_t bytes = min(min_folio_size, iov_iter_count(from));
> char *kaddr;
>
> - folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0);
> - if (!folios[i]) {
> + folio = btrfs_alloc_compr_folio(fs_info);
> + if (!folio) {
> ret = -ENOMEM;
> - goto out_folios;
> + goto out_cb;
> }
> - kaddr = kmap_local_folio(folios[i], 0);
> + kaddr = kmap_local_folio(folio, 0);
> if (copy_from_iter(kaddr, bytes, from) != bytes) {
> kunmap_local(kaddr);
> + folio_put(folio);
> ret = -EFAULT;
> - goto out_folios;
> + goto out_cb;
> + }
> + if (bytes < min_folio_size)
> + folio_zero_range(folio, bytes, min_folio_size - bytes);
> + ret = bio_add_folio(&cb->bbio.bio, folio, folio_size(folio), 0);
> + if (!unlikely(ret)) {
Should this be unlikely(!ret) instead of !unlikely(ret)?
While !unlikely(ret) evaluates to the same boolean result as !ret, the
branch prediction hint is inverted.
> + folio_put(folio);
> + ret = -EINVAL;
> + goto out_cb;
> }
> - if (bytes < PAGE_SIZE)
> - memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
> - kunmap_local(kaddr);
Is there a missing kunmap_local(kaddr) here? The original code called
kunmap_local() after the memset:
if (bytes < PAGE_SIZE)
memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
kunmap_local(kaddr);
But the new code appears to have lost the corresponding kunmap_local().
> }
> + ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
>
> for (;;) {
> ret = btrfs_wait_ordered_range(inode, start, num_bytes);
> if (ret)
> - goto out_folios;
> + goto out_cb;
> ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
> start >> PAGE_SHIFT,
> end >> PAGE_SHIFT);
> if (ret)
> - goto out_folios;
> + goto out_cb;
> btrfs_lock_extent(io_tree, start, end, &cached_state);
> ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
> if (!ordered &&
> @@ -9962,7 +9970,8 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
> encoded->unencoded_offset == 0 &&
> can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
> ret = __cow_file_range_inline(inode, encoded->len,
> - orig_count, compression, folios[0],
> + orig_count, compression,
> + bio_first_folio_all(&cb->bbio.bio),
> true);
> if (ret <= 0) {
> if (ret == 0)
> @@ -10007,7 +10016,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
>
> btrfs_delalloc_release_extents(inode, num_bytes);
>
> - btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false);
> + btrfs_submit_compressed_write(ordered, cb);
> ret = orig_count;
> goto out;
>
> @@ -10029,12 +10038,9 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
> btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
> out_unlock:
> btrfs_unlock_extent(io_tree, start, end, &cached_state);
> -out_folios:
> - for (i = 0; i < nr_folios; i++) {
> - if (folios[i])
> - folio_put(folios[i]);
> - }
> - kvfree(folios);
> +out_cb:
> + if (cb)
> + cleanup_compressed_bio(cb);
> out:
> if (ret >= 0)
> iocb->ki_pos += encoded->len;
> --
> 2.52.0
>
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH v3 8/9] btrfs: get rid of compressed_folios[] usage for encoded writes
2026-01-27 20:28 ` Boris Burkov
@ 2026-01-27 21:10 ` Qu Wenruo
2026-01-27 21:27 ` Boris Burkov
2026-01-29 1:47 ` David Sterba
1 sibling, 1 reply; 16+ messages in thread
From: Qu Wenruo @ 2026-01-27 21:10 UTC (permalink / raw)
To: Boris Burkov; +Cc: linux-btrfs
在 2026/1/28 06:58, Boris Burkov 写道:
> On Tue, Jan 27, 2026 at 01:40:41PM +1030, Qu Wenruo wrote:
>> Currently only encoded writes utilized btrfs_submit_compressed_write(),
>> which utilized compressed_bio::compressed_folios[] array.
>>
>> Change the only call site to call the new helper,
>> btrfs_alloc_compressed_write(), to allocate a compressed bio, then queue
>> needed folios into that bio, and finally call
>> btrfs_submit_compressed_write() to submit the compreseed bio.
>>
>> This change has one hidden benefit, previously we use
>> btrfs_alloc_folio_array() for the folios of
>> btrfs_submit_compressed_read(), which doesn't utilize the compression
>> page pool for bs == ps cases.
>>
>> Now we call btrfs_alloc_compr_folio() which will benefit from page pool.
>>
>> The other obvious benefit is that we no longer need to allocate an array
>> to hold all those folios, thus one less error path.
>
> This review is from claude using Chris's review prompts with some light
> editing / checking by me.
Wow, the AI review is better than I thought.
Indeed caught two real and careless errors.
[...]
>> + if (bytes < min_folio_size)
>> + folio_zero_range(folio, bytes, min_folio_size - bytes);
>> + ret = bio_add_folio(&cb->bbio.bio, folio, folio_size(folio), 0);
>> + if (!unlikely(ret)) {
>
> Should this be unlikely(!ret) instead of !unlikely(ret)?
My bad, it should follow all the other sites to use if (unlikely(!ret)),
but the heatwave makes my fingers slip.
>
> While !unlikely(ret) evaluates to the same boolean result as !ret, the
> branch prediction hint is inverted.
>
>> + folio_put(folio);
>> + ret = -EINVAL;
>> + goto out_cb;
>> }
>> - if (bytes < PAGE_SIZE)
>> - memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
>> - kunmap_local(kaddr);
>
> Is there a missing kunmap_local(kaddr) here? The original code called
> kunmap_local() after the memset:
>
> if (bytes < PAGE_SIZE)
> memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
> kunmap_local(kaddr);
I replaced the memset() with folio_zero_range() but incorrectly deleted
the kunmap_local().
The proper location of the kunmap_local() would be after the if
(copy_from_iter()) block.'
Or even move the copy_from_iter() out of the if (), and immediately
kunmap(), then check the returned value.
Thanks a lot for the AI assistant review, which is better than my
expectation.
Qu
>
> But the new code appears to have lost the corresponding kunmap_local().
>
>> }
>> + ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
>>
>> for (;;) {
>> ret = btrfs_wait_ordered_range(inode, start, num_bytes);
>> if (ret)
>> - goto out_folios;
>> + goto out_cb;
>> ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
>> start >> PAGE_SHIFT,
>> end >> PAGE_SHIFT);
>> if (ret)
>> - goto out_folios;
>> + goto out_cb;
>> btrfs_lock_extent(io_tree, start, end, &cached_state);
>> ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
>> if (!ordered &&
>> @@ -9962,7 +9970,8 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
>> encoded->unencoded_offset == 0 &&
>> can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
>> ret = __cow_file_range_inline(inode, encoded->len,
>> - orig_count, compression, folios[0],
>> + orig_count, compression,
>> + bio_first_folio_all(&cb->bbio.bio),
>> true);
>> if (ret <= 0) {
>> if (ret == 0)
>> @@ -10007,7 +10016,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
>>
>> btrfs_delalloc_release_extents(inode, num_bytes);
>>
>> - btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false);
>> + btrfs_submit_compressed_write(ordered, cb);
>> ret = orig_count;
>> goto out;
>>
>> @@ -10029,12 +10038,9 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
>> btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
>> out_unlock:
>> btrfs_unlock_extent(io_tree, start, end, &cached_state);
>> -out_folios:
>> - for (i = 0; i < nr_folios; i++) {
>> - if (folios[i])
>> - folio_put(folios[i]);
>> - }
>> - kvfree(folios);
>> +out_cb:
>> + if (cb)
>> + cleanup_compressed_bio(cb);
>> out:
>> if (ret >= 0)
>> iocb->ki_pos += encoded->len;
>> --
>> 2.52.0
>>
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH v3 8/9] btrfs: get rid of compressed_folios[] usage for encoded writes
2026-01-27 21:10 ` Qu Wenruo
@ 2026-01-27 21:27 ` Boris Burkov
2026-01-27 21:29 ` Qu Wenruo
0 siblings, 1 reply; 16+ messages in thread
From: Boris Burkov @ 2026-01-27 21:27 UTC (permalink / raw)
To: Qu Wenruo; +Cc: linux-btrfs
On Wed, Jan 28, 2026 at 07:40:23AM +1030, Qu Wenruo wrote:
>
>
> 在 2026/1/28 06:58, Boris Burkov 写道:
> > On Tue, Jan 27, 2026 at 01:40:41PM +1030, Qu Wenruo wrote:
> > > Currently only encoded writes utilized btrfs_submit_compressed_write(),
> > > which utilized compressed_bio::compressed_folios[] array.
> > >
> > > Change the only call site to call the new helper,
> > > btrfs_alloc_compressed_write(), to allocate a compressed bio, then queue
> > > needed folios into that bio, and finally call
> > > btrfs_submit_compressed_write() to submit the compreseed bio.
> > >
> > > This change has one hidden benefit, previously we use
> > > btrfs_alloc_folio_array() for the folios of
> > > btrfs_submit_compressed_read(), which doesn't utilize the compression
> > > page pool for bs == ps cases.
> > >
> > > Now we call btrfs_alloc_compr_folio() which will benefit from page pool.
> > >
> > > The other obvious benefit is that we no longer need to allocate an array
> > > to hold all those folios, thus one less error path.
> >
> > This review is from claude using Chris's review prompts with some light
> > editing / checking by me.
>
> Wow, the AI review is better than I thought.
>
> Indeed caught two real and careless errors.
I have been impressed lately as well. The main reason I fired it up on
your patches was that it found several interesting bugs in my recent
work as well.
>
> [...]
> > > + if (bytes < min_folio_size)
> > > + folio_zero_range(folio, bytes, min_folio_size - bytes);
> > > + ret = bio_add_folio(&cb->bbio.bio, folio, folio_size(folio), 0);
> > > + if (!unlikely(ret)) {
> >
> > Should this be unlikely(!ret) instead of !unlikely(ret)?
>
> My bad, it should follow all the other sites to use if (unlikely(!ret)), but
> the heatwave makes my fingers slip.
>
> >
> > While !unlikely(ret) evaluates to the same boolean result as !ret, the
> > branch prediction hint is inverted.
> >
> > > + folio_put(folio);
> > > + ret = -EINVAL;
> > > + goto out_cb;
> > > }
> > > - if (bytes < PAGE_SIZE)
> > > - memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
> > > - kunmap_local(kaddr);
> >
> > Is there a missing kunmap_local(kaddr) here? The original code called
> > kunmap_local() after the memset:
> >
> > if (bytes < PAGE_SIZE)
> > memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
> > kunmap_local(kaddr);
>
> I replaced the memset() with folio_zero_range() but incorrectly deleted the
> kunmap_local().
>
> The proper location of the kunmap_local() would be after the if
> (copy_from_iter()) block.'
> Or even move the copy_from_iter() out of the if (), and immediately
> kunmap(), then check the returned value.
>
> Thanks a lot for the AI assistant review, which is better than my
> expectation.
> Qu
>
> >
> > But the new code appears to have lost the corresponding kunmap_local().
> >
> > > }
> > > + ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
> > > for (;;) {
> > > ret = btrfs_wait_ordered_range(inode, start, num_bytes);
> > > if (ret)
> > > - goto out_folios;
> > > + goto out_cb;
> > > ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
> > > start >> PAGE_SHIFT,
> > > end >> PAGE_SHIFT);
> > > if (ret)
> > > - goto out_folios;
> > > + goto out_cb;
> > > btrfs_lock_extent(io_tree, start, end, &cached_state);
> > > ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
> > > if (!ordered &&
> > > @@ -9962,7 +9970,8 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
> > > encoded->unencoded_offset == 0 &&
> > > can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
> > > ret = __cow_file_range_inline(inode, encoded->len,
> > > - orig_count, compression, folios[0],
> > > + orig_count, compression,
> > > + bio_first_folio_all(&cb->bbio.bio),
> > > true);
> > > if (ret <= 0) {
> > > if (ret == 0)
> > > @@ -10007,7 +10016,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
> > > btrfs_delalloc_release_extents(inode, num_bytes);
> > > - btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false);
> > > + btrfs_submit_compressed_write(ordered, cb);
> > > ret = orig_count;
> > > goto out;
> > > @@ -10029,12 +10038,9 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
> > > btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
> > > out_unlock:
> > > btrfs_unlock_extent(io_tree, start, end, &cached_state);
> > > -out_folios:
> > > - for (i = 0; i < nr_folios; i++) {
> > > - if (folios[i])
> > > - folio_put(folios[i]);
> > > - }
> > > - kvfree(folios);
> > > +out_cb:
> > > + if (cb)
> > > + cleanup_compressed_bio(cb);
> > > out:
> > > if (ret >= 0)
> > > iocb->ki_pos += encoded->len;
> > > --
> > > 2.52.0
> > >
>
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH v3 8/9] btrfs: get rid of compressed_folios[] usage for encoded writes
2026-01-27 21:27 ` Boris Burkov
@ 2026-01-27 21:29 ` Qu Wenruo
2026-01-27 21:49 ` Chris Mason
0 siblings, 1 reply; 16+ messages in thread
From: Qu Wenruo @ 2026-01-27 21:29 UTC (permalink / raw)
To: Boris Burkov, Qu Wenruo; +Cc: linux-btrfs
在 2026/1/28 07:57, Boris Burkov 写道:
> On Wed, Jan 28, 2026 at 07:40:23AM +1030, Qu Wenruo wrote:
[...]
>>
>> Wow, the AI review is better than I thought.
>>
>> Indeed caught two real and careless errors.
>
> I have been impressed lately as well. The main reason I fired it up on
> your patches was that it found several interesting bugs in my recent
> work as well.
>
Can we have a public bot doing this? Or will it easily exhaust the quota?
Thanks,
Qu
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v3 8/9] btrfs: get rid of compressed_folios[] usage for encoded writes
2026-01-27 21:29 ` Qu Wenruo
@ 2026-01-27 21:49 ` Chris Mason
0 siblings, 0 replies; 16+ messages in thread
From: Chris Mason @ 2026-01-27 21:49 UTC (permalink / raw)
To: Qu Wenruo, Boris Burkov, Qu Wenruo; +Cc: linux-btrfs
On 1/27/26 4:29 PM, Qu Wenruo wrote:
>
>
> 在 2026/1/28 07:57, Boris Burkov 写道:
>> On Wed, Jan 28, 2026 at 07:40:23AM +1030, Qu Wenruo wrote:
> [...]
>>>
>>> Wow, the AI review is better than I thought.
>>>
>>> Indeed caught two real and careless errors.
>>
>> I have been impressed lately as well. The main reason I fired it up on
>> your patches was that it found several interesting bugs in my recent
>> work as well.
>>
>
> Can we have a public bot doing this? Or will it easily exhaust the quota?
That's definitely my goal, I want to get it running on any subsystem who
wants to sign up. Roman Gushchin and I are trying to find the most
effective way to get this going.
The prompts are in the github link below, they've been tested with
google and anthropic, but I expect the other major models also work.
https://github.com/masoncl/review-prompts
-chris
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v3 8/9] btrfs: get rid of compressed_folios[] usage for encoded writes
2026-01-27 20:28 ` Boris Burkov
2026-01-27 21:10 ` Qu Wenruo
@ 2026-01-29 1:47 ` David Sterba
1 sibling, 0 replies; 16+ messages in thread
From: David Sterba @ 2026-01-29 1:47 UTC (permalink / raw)
To: Boris Burkov; +Cc: Qu Wenruo, linux-btrfs
On Tue, Jan 27, 2026 at 12:28:05PM -0800, Boris Burkov wrote:
> > + if (!unlikely(ret)) {
>
> Should this be unlikely(!ret) instead of !unlikely(ret)?
>
> While !unlikely(ret) evaluates to the same boolean result as !ret, the
> branch prediction hint is inverted.
Once identified such trivial patterns can be caught by coccinelle
scripts, I've added that one (https://github.com/btrfs/workflow), though
it does not seem to be 100% reliable. A "grep '!unlikely'" works.
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH v3 9/9] btrfs: get rid of compressed_bio::compressed_folios[]
2026-01-27 3:10 [PATCH v3 0/9] btrfs: used compressed_bio structure for read and write Qu Wenruo
` (7 preceding siblings ...)
2026-01-27 3:10 ` [PATCH v3 8/9] btrfs: get rid of compressed_folios[] usage for encoded writes Qu Wenruo
@ 2026-01-27 3:10 ` Qu Wenruo
8 siblings, 0 replies; 16+ messages in thread
From: Qu Wenruo @ 2026-01-27 3:10 UTC (permalink / raw)
To: linux-btrfs
Now there is no one utilizing that member, we can safely remove it along
with compressed_bio::nr_folios member.
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
fs/btrfs/compression.c | 1 -
fs/btrfs/compression.h | 6 ------
2 files changed, 7 deletions(-)
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 205f6828c1e6..ebada0b64846 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -300,7 +300,6 @@ static void end_bbio_compressed_write(struct btrfs_bio *bbio)
/* Note, our inode could be gone now. */
bio_for_each_folio_all(fi, &bbio->bio)
btrfs_free_compr_folio(fi.folio);
- kfree(cb->compressed_folios);
bio_put(&cb->bbio.bio);
}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 2d3a28b26997..65b8bc4bbe0b 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -42,12 +42,6 @@ static_assert((BTRFS_MAX_COMPRESSED % PAGE_SIZE) == 0);
#define BTRFS_ZLIB_DEFAULT_LEVEL 3
struct compressed_bio {
- /* Number of compressed folios in the array. */
- unsigned int nr_folios;
-
- /* The folios with the compressed data on them. */
- struct folio **compressed_folios;
-
/* starting offset in the inode for our pages */
u64 start;
--
2.52.0
^ permalink raw reply related [flat|nested] 16+ messages in thread