* [PATCH v2 0/2] replace BUG() and BUG_ON() with error handling
@ 2026-02-28 9:06 Adarsh Das
2026-02-28 9:06 ` [PATCH v2 1/2] btrfs: replace BUG() with error handling in compression.c Adarsh Das
2026-02-28 9:06 ` [PATCH v2 2/2] btrfs: replace BUG() and BUG_ON() with error handling in extent-tree.c Adarsh Das
0 siblings, 2 replies; 6+ messages in thread
From: Adarsh Das @ 2026-02-28 9:06 UTC (permalink / raw)
To: clm, dsterba; +Cc: terrelln, linux-btrfs, linux-kernel, Adarsh Das
v2:
- replace btrfs_err() + -EUCLEAN with ASSERT() for runtime logic bugs as suggested by Qu Wenruo
- fold coding style fixes into main patches
Adarsh Das (2):
btrfs: replace BUG() with error handling in compression.c
btrfs: replace BUG() and BUG_ON() with error handling in extent-tree.c
fs/btrfs/compression.c | 74 ++++++++++++++----------------------------
fs/btrfs/delayed-ref.c | 8 +++--
fs/btrfs/extent-tree.c | 62 +++++++++++++++++------------------
3 files changed, 61 insertions(+), 83 deletions(-)
--
2.53.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH v2 1/2] btrfs: replace BUG() with error handling in compression.c
2026-02-28 9:06 [PATCH v2 0/2] replace BUG() and BUG_ON() with error handling Adarsh Das
@ 2026-02-28 9:06 ` Adarsh Das
2026-02-28 20:36 ` Qu Wenruo
2026-03-01 11:34 ` Filipe Manana
2026-02-28 9:06 ` [PATCH v2 2/2] btrfs: replace BUG() and BUG_ON() with error handling in extent-tree.c Adarsh Das
1 sibling, 2 replies; 6+ messages in thread
From: Adarsh Das @ 2026-02-28 9:06 UTC (permalink / raw)
To: clm, dsterba; +Cc: terrelln, linux-btrfs, linux-kernel, Adarsh Das
v2:
- use ASSERT() instead of btrfs_err() + -EUCLEAN
- remove default: branches and add upfront ASSERT() for type validation
- fold coding style fixes into this patch
Signed-off-by: Adarsh Das <adarshdas950@gmail.com>
---
fs/btrfs/compression.c | 74 ++++++++++++++----------------------------
1 file changed, 25 insertions(+), 49 deletions(-)
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 790518a8c803..0d8da8ce5fd3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -36,9 +36,9 @@
static struct bio_set btrfs_compressed_bioset;
-static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
+static const char * const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
-const char* btrfs_compress_type2str(enum btrfs_compression_type type)
+const char *btrfs_compress_type2str(enum btrfs_compression_type type)
{
switch (type) {
case BTRFS_COMPRESS_ZLIB:
@@ -89,24 +89,21 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
static int compression_decompress_bio(struct list_head *ws,
struct compressed_bio *cb)
{
+ ASSERT(cb->compress_type > BTRFS_COMPRESS_NONE &&
+ cb->compress_type < BTRFS_NR_COMPRESS_TYPES);
switch (cb->compress_type) {
case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
- case BTRFS_COMPRESS_NONE:
- default:
- /*
- * This can't happen, the type is validated several times
- * before we get here.
- */
- BUG();
}
+ return -EUCLEAN;
}
static int compression_decompress(int type, struct list_head *ws,
const u8 *data_in, struct folio *dest_folio,
unsigned long dest_pgoff, size_t srclen, size_t destlen)
{
+ ASSERT(type > BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
switch (type) {
case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_folio,
dest_pgoff, srclen, destlen);
@@ -114,14 +111,8 @@ static int compression_decompress(int type, struct list_head *ws,
dest_pgoff, srclen, destlen);
case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_folio,
dest_pgoff, srclen, destlen);
- case BTRFS_COMPRESS_NONE:
- default:
- /*
- * This can't happen, the type is validated several times
- * before we get here.
- */
- BUG();
}
+ return -EUCLEAN;
}
static int btrfs_decompress_bio(struct compressed_bio *cb);
@@ -484,6 +475,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (zero_offset) {
int zeros;
+
zeros = folio_size(folio) - zero_offset;
folio_zero_range(folio, zero_offset, zeros);
}
@@ -697,33 +689,25 @@ static const struct btrfs_compress_levels * const btrfs_compress_levels[] = {
static struct list_head *alloc_workspace(struct btrfs_fs_info *fs_info, int type, int level)
{
+
+ ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
switch (type) {
case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(fs_info);
case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(fs_info, level);
case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(fs_info);
case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(fs_info, level);
- default:
- /*
- * This can't happen, the type is validated several times
- * before we get here.
- */
- BUG();
}
+ return ERR_PTR(-EUCLEAN);
}
static void free_workspace(int type, struct list_head *ws)
{
+ ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
switch (type) {
case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
- default:
- /*
- * This can't happen, the type is validated several times
- * before we get here.
- */
- BUG();
}
}
@@ -792,7 +776,7 @@ struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, i
struct workspace_manager *wsm = fs_info->compr_wsm[type];
struct list_head *workspace;
int cpus = num_online_cpus();
- unsigned nofs_flag;
+ unsigned int nofs_flag;
struct list_head *idle_ws;
spinlock_t *ws_lock;
atomic_t *total_ws;
@@ -868,18 +852,14 @@ struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, i
static struct list_head *get_workspace(struct btrfs_fs_info *fs_info, int type, int level)
{
+ ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
switch (type) {
case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(fs_info, type, level);
case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(fs_info, level);
case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(fs_info, type, level);
case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(fs_info, level);
- default:
- /*
- * This can't happen, the type is validated several times
- * before we get here.
- */
- BUG();
}
+ return ERR_PTR(-EUCLEAN);
}
/*
@@ -919,17 +899,12 @@ void btrfs_put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_he
static void put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws)
{
+ ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
switch (type) {
case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(fs_info, type, ws);
case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(fs_info, type, ws);
case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(fs_info, type, ws);
case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(fs_info, ws);
- default:
- /*
- * This can't happen, the type is validated several times
- * before we get here.
- */
- BUG();
}
}
@@ -1181,17 +1156,17 @@ static u64 file_offset_from_bvec(const struct bio_vec *bvec)
* @buf: The decompressed data buffer
* @buf_len: The decompressed data length
* @decompressed: Number of bytes that are already decompressed inside the
- * compressed extent
+ * compressed extent
* @cb: The compressed extent descriptor
* @orig_bio: The original bio that the caller wants to read for
*
* An easier to understand graph is like below:
*
- * |<- orig_bio ->| |<- orig_bio->|
- * |<------- full decompressed extent ----->|
- * |<----------- @cb range ---->|
- * | |<-- @buf_len -->|
- * |<--- @decompressed --->|
+ * |<- orig_bio ->| |<- orig_bio->|
+ * |<------- full decompressed extent ----->|
+ * |<----------- @cb range ---->|
+ * | |<-- @buf_len -->|
+ * |<--- @decompressed --->|
*
* Note that, @cb can be a subpage of the full decompressed extent, but
* @cb->start always has the same as the orig_file_offset value of the full
@@ -1313,7 +1288,8 @@ static u32 shannon_entropy(struct heuristic_ws *ws)
#define RADIX_BASE 4U
#define COUNTERS_SIZE (1U << RADIX_BASE)
-static u8 get4bits(u64 num, int shift) {
+static u8 get4bits(u64 num, int shift)
+{
u8 low4bits;
num >>= shift;
@@ -1388,7 +1364,7 @@ static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
*/
memset(counters, 0, sizeof(counters));
- for (i = 0; i < num; i ++) {
+ for (i = 0; i < num; i++) {
buf_num = array_buf[i].count;
addr = get4bits(buf_num, shift);
counters[addr]++;
--
2.53.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v2 2/2] btrfs: replace BUG() and BUG_ON() with error handling in extent-tree.c
2026-02-28 9:06 [PATCH v2 0/2] replace BUG() and BUG_ON() with error handling Adarsh Das
2026-02-28 9:06 ` [PATCH v2 1/2] btrfs: replace BUG() with error handling in compression.c Adarsh Das
@ 2026-02-28 9:06 ` Adarsh Das
2026-02-28 20:37 ` Qu Wenruo
1 sibling, 1 reply; 6+ messages in thread
From: Adarsh Das @ 2026-02-28 9:06 UTC (permalink / raw)
To: clm, dsterba; +Cc: terrelln, linux-btrfs, linux-kernel, Adarsh Das
v2:
- use ASSERT() instead of btrfs_err() + -EUCLEAN
- append ASSERTs in btrfs_add_delayed_data_ref() and btrfs_add_delayed_tree_ref() to validate action at insertion time instead of runtime
- fold coding style fixes into this patch
Signed-off-by: Adarsh Das <adarshdas950@gmail.com>
---
fs/btrfs/delayed-ref.c | 8 ++++--
fs/btrfs/extent-tree.c | 62 ++++++++++++++++++++----------------------
2 files changed, 36 insertions(+), 34 deletions(-)
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 3766ff29fbbb..d308c70228af 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -1113,7 +1113,9 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
struct btrfs_delayed_extent_op *extent_op)
{
- ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
+ ASSERT(generic_ref->type == BTRFS_REF_METADATA &&
+ (generic_ref->action == BTRFS_ADD_DELAYED_REF ||
+ generic_ref->action == BTRFS_DROP_DELAYED_REF));
return add_delayed_ref(trans, generic_ref, extent_op, 0);
}
@@ -1124,7 +1126,9 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
u64 reserved)
{
- ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
+ ASSERT(generic_ref->type == BTRFS_REF_DATA &&
+ (generic_ref->action == BTRFS_ADD_DELAYED_REF ||
+ generic_ref->action == BTRFS_DROP_DELAYED_REF));
return add_delayed_ref(trans, generic_ref, NULL, reserved);
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 03cf9f242c70..98bdf51774c4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -604,7 +604,7 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
return -EUCLEAN;
}
- BUG_ON(num_refs < refs_to_drop);
+ ASSERT(num_refs >= refs_to_drop);
num_refs -= refs_to_drop;
if (num_refs == 0) {
@@ -863,7 +863,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
ptr += sizeof(struct btrfs_tree_block_info);
- BUG_ON(ptr > end);
+ ASSERT(ptr <= end);
}
if (owner >= BTRFS_FIRST_FREE_OBJECTID)
@@ -1237,7 +1237,7 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
{
int ret = 0;
- BUG_ON(!is_data && refs_to_drop != 1);
+ ASSERT(is_data || refs_to_drop == 1);
if (iref)
ret = update_inline_extent_backref(trans, path, iref,
-refs_to_drop, NULL);
@@ -1451,10 +1451,9 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
- ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
- generic_ref->action);
- BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
- generic_ref->ref_root == BTRFS_TREE_LOG_OBJECTID);
+ ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && generic_ref->action);
+ ASSERT(generic_ref->type != BTRFS_REF_METADATA ||
+ generic_ref->ref_root != BTRFS_TREE_LOG_OBJECTID);
if (generic_ref->type == BTRFS_REF_METADATA)
ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
@@ -1621,8 +1620,6 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
ret = __btrfs_inc_extent_ref(trans, node, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
ret = __btrfs_free_extent(trans, href, node, extent_op);
- } else {
- BUG();
}
return ret;
}
@@ -1639,7 +1636,7 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
if (extent_op->update_key) {
struct btrfs_tree_block_info *bi;
- BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
+ ASSERT(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK);
bi = (struct btrfs_tree_block_info *)(ei + 1);
btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
}
@@ -1774,8 +1771,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
ret = drop_remap_tree_ref(trans, node);
else
ret = __btrfs_free_extent(trans, href, node, extent_op);
- } else {
- BUG();
}
return ret;
}
@@ -2088,7 +2083,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
* head
*/
ret = cleanup_ref_head(trans, locked_ref, &bytes_processed);
- if (ret > 0 ) {
+ if (ret > 0) {
/* We dropped our lock, we need to loop. */
ret = 0;
continue;
@@ -2645,7 +2640,7 @@ int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes
struct btrfs_block_group *cache;
cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
- BUG_ON(!cache); /* Logic error */
+ ASSERT(cache);
pin_down_extent(trans, cache, bytenr, num_bytes, true);
@@ -4119,20 +4114,25 @@ static int do_allocation(struct btrfs_block_group *block_group,
struct find_free_extent_ctl *ffe_ctl,
struct btrfs_block_group **bg_ret)
{
+ ASSERT(ffe_ctl->policy == BTRFS_EXTENT_ALLOC_CLUSTERED ||
+ ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED);
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
return do_allocation_clustered(block_group, ffe_ctl, bg_ret);
case BTRFS_EXTENT_ALLOC_ZONED:
return do_allocation_zoned(block_group, ffe_ctl, bg_ret);
- default:
- BUG();
}
+ return -EUCLEAN;
}
static void release_block_group(struct btrfs_block_group *block_group,
struct find_free_extent_ctl *ffe_ctl,
bool delalloc)
{
+ ASSERT(btrfs_bg_flags_to_raid_index(block_group->flags) ==
+ ffe_ctl->index);
+ ASSERT(ffe_ctl->policy == BTRFS_EXTENT_ALLOC_CLUSTERED ||
+ ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED);
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
ffe_ctl->retry_uncached = false;
@@ -4140,12 +4140,8 @@ static void release_block_group(struct btrfs_block_group *block_group,
case BTRFS_EXTENT_ALLOC_ZONED:
/* Nothing to do */
break;
- default:
- BUG();
}
- BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
- ffe_ctl->index);
btrfs_release_block_group(block_group, delalloc);
}
@@ -4164,6 +4160,8 @@ static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl,
static void found_extent(struct find_free_extent_ctl *ffe_ctl,
struct btrfs_key *ins)
{
+ ASSERT(ffe_ctl->policy == BTRFS_EXTENT_ALLOC_CLUSTERED ||
+ ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED);
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
found_extent_clustered(ffe_ctl, ins);
@@ -4171,8 +4169,6 @@ static void found_extent(struct find_free_extent_ctl *ffe_ctl,
case BTRFS_EXTENT_ALLOC_ZONED:
/* Nothing to do */
break;
- default:
- BUG();
}
}
@@ -4232,14 +4228,15 @@ static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info,
static int can_allocate_chunk(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl)
{
+ ASSERT(ffe_ctl->policy == BTRFS_EXTENT_ALLOC_CLUSTERED ||
+ ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED);
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
return 0;
case BTRFS_EXTENT_ALLOC_ZONED:
return can_allocate_chunk_zoned(fs_info, ffe_ctl);
- default:
- BUG();
}
+ return -EUCLEAN;
}
/*
@@ -4310,8 +4307,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
if (ret == -ENOSPC) {
ret = 0;
ffe_ctl->loop++;
- }
- else if (ret < 0)
+ } else if (ret < 0)
btrfs_abort_transaction(trans, ret);
else
ret = 0;
@@ -4441,15 +4437,16 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
struct btrfs_key *ins)
{
+ ASSERT(ffe_ctl->policy == BTRFS_EXTENT_ALLOC_CLUSTERED ||
+ ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED);
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
return prepare_allocation_clustered(fs_info, ffe_ctl,
space_info, ins);
case BTRFS_EXTENT_ALLOC_ZONED:
return prepare_allocation_zoned(fs_info, ffe_ctl, space_info);
- default:
- BUG();
}
+ return -EUCLEAN;
}
/*
@@ -5260,6 +5257,8 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
u64 owning_root;
+ ASSERT(parent <= 0);
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (btrfs_is_testing(fs_info)) {
buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
@@ -5292,8 +5291,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
parent = ins.objectid;
flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
owning_root = reloc_src_root;
- } else
- BUG_ON(parent > 0);
+ }
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
struct btrfs_delayed_extent_op *extent_op;
@@ -5633,7 +5631,7 @@ static int check_ref_exists(struct btrfs_trans_handle *trans,
* If we get 0 then we found our reference, return 1, else
* return the error if it's not -ENOENT;
*/
- return (ret < 0 ) ? ret : 1;
+ return (ret < 0) ? ret : 1;
}
/*
@@ -6437,7 +6435,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
int parent_level;
int ret = 0;
- BUG_ON(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
+ ASSERT(btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID);
path = btrfs_alloc_path();
if (!path)
--
2.53.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v2 1/2] btrfs: replace BUG() with error handling in compression.c
2026-02-28 9:06 ` [PATCH v2 1/2] btrfs: replace BUG() with error handling in compression.c Adarsh Das
@ 2026-02-28 20:36 ` Qu Wenruo
2026-03-01 11:34 ` Filipe Manana
1 sibling, 0 replies; 6+ messages in thread
From: Qu Wenruo @ 2026-02-28 20:36 UTC (permalink / raw)
To: Adarsh Das, clm, dsterba; +Cc: terrelln, linux-btrfs, linux-kernel
在 2026/2/28 19:36, Adarsh Das 写道:
> v2:
> - use ASSERT() instead of btrfs_err() + -EUCLEAN
> - remove default: branches and add upfront ASSERT() for type validation
> - fold coding style fixes into this patch
Please put proper commit message other than change log here.
Especially if you have put a changelog in the cover letter, there is no
need to re-mention it again in each patch.
>
> Signed-off-by: Adarsh Das <adarshdas950@gmail.com>
> ---
> fs/btrfs/compression.c | 74 ++++++++++++++----------------------------
> 1 file changed, 25 insertions(+), 49 deletions(-)
>
> diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
> index 790518a8c803..0d8da8ce5fd3 100644
> --- a/fs/btrfs/compression.c
> +++ b/fs/btrfs/compression.c
> @@ -36,9 +36,9 @@
>
> static struct bio_set btrfs_compressed_bioset;
>
> -static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
> +static const char * const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
>
> -const char* btrfs_compress_type2str(enum btrfs_compression_type type)
> +const char *btrfs_compress_type2str(enum btrfs_compression_type type)
> {
> switch (type) {
> case BTRFS_COMPRESS_ZLIB:
> @@ -89,24 +89,21 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
> static int compression_decompress_bio(struct list_head *ws,
> struct compressed_bio *cb)
> {
> + ASSERT(cb->compress_type > BTRFS_COMPRESS_NONE &&
> + cb->compress_type < BTRFS_NR_COMPRESS_TYPES);
> switch (cb->compress_type) {
> case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
> case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
> case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
> - case BTRFS_COMPRESS_NONE:
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> + return -EUCLEAN;
> }
>
> static int compression_decompress(int type, struct list_head *ws,
> const u8 *data_in, struct folio *dest_folio,
> unsigned long dest_pgoff, size_t srclen, size_t destlen)
> {
> + ASSERT(type > BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
> switch (type) {
> case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_folio,
> dest_pgoff, srclen, destlen);
> @@ -114,14 +111,8 @@ static int compression_decompress(int type, struct list_head *ws,
> dest_pgoff, srclen, destlen);
> case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_folio,
> dest_pgoff, srclen, destlen);
> - case BTRFS_COMPRESS_NONE:
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> + return -EUCLEAN;
> }
>
> static int btrfs_decompress_bio(struct compressed_bio *cb);
> @@ -484,6 +475,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
>
> if (zero_offset) {
> int zeros;
> +
> zeros = folio_size(folio) - zero_offset;
> folio_zero_range(folio, zero_offset, zeros);
> }
> @@ -697,33 +689,25 @@ static const struct btrfs_compress_levels * const btrfs_compress_levels[] = {
>
> static struct list_head *alloc_workspace(struct btrfs_fs_info *fs_info, int type, int level)
> {
> +
> + ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
> switch (type) {
> case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(fs_info);
> case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(fs_info, level);
> case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(fs_info);
> case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(fs_info, level);
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> + return ERR_PTR(-EUCLEAN);
> }
>
> static void free_workspace(int type, struct list_head *ws)
> {
> + ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
> switch (type) {
> case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
> case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
> case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
> case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> }
>
> @@ -792,7 +776,7 @@ struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, i
> struct workspace_manager *wsm = fs_info->compr_wsm[type];
> struct list_head *workspace;
> int cpus = num_online_cpus();
> - unsigned nofs_flag;
> + unsigned int nofs_flag;
> struct list_head *idle_ws;
> spinlock_t *ws_lock;
> atomic_t *total_ws;
> @@ -868,18 +852,14 @@ struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, i
>
> static struct list_head *get_workspace(struct btrfs_fs_info *fs_info, int type, int level)
> {
> + ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
> switch (type) {
> case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(fs_info, type, level);
> case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(fs_info, level);
> case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(fs_info, type, level);
> case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(fs_info, level);
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> + return ERR_PTR(-EUCLEAN);
> }
>
> /*
> @@ -919,17 +899,12 @@ void btrfs_put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_he
>
> static void put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws)
> {
> + ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
> switch (type) {
> case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(fs_info, type, ws);
> case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(fs_info, type, ws);
> case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(fs_info, type, ws);
> case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(fs_info, ws);
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> }
>
> @@ -1181,17 +1156,17 @@ static u64 file_offset_from_bvec(const struct bio_vec *bvec)
> * @buf: The decompressed data buffer
> * @buf_len: The decompressed data length
> * @decompressed: Number of bytes that are already decompressed inside the
> - * compressed extent
> + * compressed extent
> * @cb: The compressed extent descriptor
> * @orig_bio: The original bio that the caller wants to read for
> *
> * An easier to understand graph is like below:
> *
> - * |<- orig_bio ->| |<- orig_bio->|
> - * |<------- full decompressed extent ----->|
> - * |<----------- @cb range ---->|
> - * | |<-- @buf_len -->|
> - * |<--- @decompressed --->|
> + * |<- orig_bio ->| |<- orig_bio->|
> + * |<------- full decompressed extent ----->|
> + * |<----------- @cb range ---->|
> + * | |<-- @buf_len -->|
> + * |<--- @decompressed --->|
> *
> * Note that, @cb can be a subpage of the full decompressed extent, but
> * @cb->start always has the same as the orig_file_offset value of the full
> @@ -1313,7 +1288,8 @@ static u32 shannon_entropy(struct heuristic_ws *ws)
> #define RADIX_BASE 4U
> #define COUNTERS_SIZE (1U << RADIX_BASE)
>
> -static u8 get4bits(u64 num, int shift) {
> +static u8 get4bits(u64 num, int shift)
> +{
> u8 low4bits;
>
> num >>= shift;
> @@ -1388,7 +1364,7 @@ static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
> */
> memset(counters, 0, sizeof(counters));
>
> - for (i = 0; i < num; i ++) {
> + for (i = 0; i < num; i++) {
> buf_num = array_buf[i].count;
> addr = get4bits(buf_num, shift);
> counters[addr]++;
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2 2/2] btrfs: replace BUG() and BUG_ON() with error handling in extent-tree.c
2026-02-28 9:06 ` [PATCH v2 2/2] btrfs: replace BUG() and BUG_ON() with error handling in extent-tree.c Adarsh Das
@ 2026-02-28 20:37 ` Qu Wenruo
0 siblings, 0 replies; 6+ messages in thread
From: Qu Wenruo @ 2026-02-28 20:37 UTC (permalink / raw)
To: Adarsh Das, clm, dsterba; +Cc: terrelln, linux-btrfs, linux-kernel
在 2026/2/28 19:36, Adarsh Das 写道:
> v2:
> - use ASSERT() instead of btrfs_err() + -EUCLEAN
> - append ASSERTs in btrfs_add_delayed_data_ref() and btrfs_add_delayed_tree_ref() to validate action at insertion time instead of runtime
> - fold coding style fixes into this patch
The same problem as patch 1.
>
> Signed-off-by: Adarsh Das <adarshdas950@gmail.com>
> ---
> fs/btrfs/delayed-ref.c | 8 ++++--
> fs/btrfs/extent-tree.c | 62 ++++++++++++++++++++----------------------
> 2 files changed, 36 insertions(+), 34 deletions(-)
>
> diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
> index 3766ff29fbbb..d308c70228af 100644
> --- a/fs/btrfs/delayed-ref.c
> +++ b/fs/btrfs/delayed-ref.c
> @@ -1113,7 +1113,9 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
> struct btrfs_ref *generic_ref,
> struct btrfs_delayed_extent_op *extent_op)
> {
> - ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
> + ASSERT(generic_ref->type == BTRFS_REF_METADATA &&
> + (generic_ref->action == BTRFS_ADD_DELAYED_REF ||
> + generic_ref->action == BTRFS_DROP_DELAYED_REF));
> return add_delayed_ref(trans, generic_ref, extent_op, 0);
> }
>
> @@ -1124,7 +1126,9 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
> struct btrfs_ref *generic_ref,
> u64 reserved)
> {
> - ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
> + ASSERT(generic_ref->type == BTRFS_REF_DATA &&
> + (generic_ref->action == BTRFS_ADD_DELAYED_REF ||
> + generic_ref->action == BTRFS_DROP_DELAYED_REF));
> return add_delayed_ref(trans, generic_ref, NULL, reserved);
> }
>
> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
> index 03cf9f242c70..98bdf51774c4 100644
> --- a/fs/btrfs/extent-tree.c
> +++ b/fs/btrfs/extent-tree.c
> @@ -604,7 +604,7 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
> return -EUCLEAN;
> }
>
> - BUG_ON(num_refs < refs_to_drop);
> + ASSERT(num_refs >= refs_to_drop);
> num_refs -= refs_to_drop;
>
> if (num_refs == 0) {
> @@ -863,7 +863,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
>
> if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
> ptr += sizeof(struct btrfs_tree_block_info);
> - BUG_ON(ptr > end);
> + ASSERT(ptr <= end);
> }
>
> if (owner >= BTRFS_FIRST_FREE_OBJECTID)
> @@ -1237,7 +1237,7 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
> {
> int ret = 0;
>
> - BUG_ON(!is_data && refs_to_drop != 1);
> + ASSERT(is_data || refs_to_drop == 1);
> if (iref)
> ret = update_inline_extent_backref(trans, path, iref,
> -refs_to_drop, NULL);
> @@ -1451,10 +1451,9 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
> struct btrfs_fs_info *fs_info = trans->fs_info;
> int ret;
>
> - ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
> - generic_ref->action);
> - BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
> - generic_ref->ref_root == BTRFS_TREE_LOG_OBJECTID);
> + ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && generic_ref->action);
> + ASSERT(generic_ref->type != BTRFS_REF_METADATA ||
> + generic_ref->ref_root != BTRFS_TREE_LOG_OBJECTID);
>
> if (generic_ref->type == BTRFS_REF_METADATA)
> ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
> @@ -1621,8 +1620,6 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
> ret = __btrfs_inc_extent_ref(trans, node, extent_op);
> } else if (node->action == BTRFS_DROP_DELAYED_REF) {
> ret = __btrfs_free_extent(trans, href, node, extent_op);
> - } else {
> - BUG();
> }
> return ret;
> }
> @@ -1639,7 +1636,7 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
>
> if (extent_op->update_key) {
> struct btrfs_tree_block_info *bi;
> - BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
> + ASSERT(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK);
> bi = (struct btrfs_tree_block_info *)(ei + 1);
> btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
> }
> @@ -1774,8 +1771,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
> ret = drop_remap_tree_ref(trans, node);
> else
> ret = __btrfs_free_extent(trans, href, node, extent_op);
> - } else {
> - BUG();
> }
> return ret;
> }
> @@ -2088,7 +2083,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
> * head
> */
> ret = cleanup_ref_head(trans, locked_ref, &bytes_processed);
> - if (ret > 0 ) {
> + if (ret > 0) {
> /* We dropped our lock, we need to loop. */
> ret = 0;
> continue;
> @@ -2645,7 +2640,7 @@ int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes
> struct btrfs_block_group *cache;
>
> cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
> - BUG_ON(!cache); /* Logic error */
> + ASSERT(cache);
>
> pin_down_extent(trans, cache, bytenr, num_bytes, true);
>
> @@ -4119,20 +4114,25 @@ static int do_allocation(struct btrfs_block_group *block_group,
> struct find_free_extent_ctl *ffe_ctl,
> struct btrfs_block_group **bg_ret)
> {
> + ASSERT(ffe_ctl->policy == BTRFS_EXTENT_ALLOC_CLUSTERED ||
> + ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED);
> switch (ffe_ctl->policy) {
> case BTRFS_EXTENT_ALLOC_CLUSTERED:
> return do_allocation_clustered(block_group, ffe_ctl, bg_ret);
> case BTRFS_EXTENT_ALLOC_ZONED:
> return do_allocation_zoned(block_group, ffe_ctl, bg_ret);
> - default:
> - BUG();
> }
> + return -EUCLEAN;
> }
>
> static void release_block_group(struct btrfs_block_group *block_group,
> struct find_free_extent_ctl *ffe_ctl,
> bool delalloc)
> {
> + ASSERT(btrfs_bg_flags_to_raid_index(block_group->flags) ==
> + ffe_ctl->index);
> + ASSERT(ffe_ctl->policy == BTRFS_EXTENT_ALLOC_CLUSTERED ||
> + ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED);
> switch (ffe_ctl->policy) {
> case BTRFS_EXTENT_ALLOC_CLUSTERED:
> ffe_ctl->retry_uncached = false;
> @@ -4140,12 +4140,8 @@ static void release_block_group(struct btrfs_block_group *block_group,
> case BTRFS_EXTENT_ALLOC_ZONED:
> /* Nothing to do */
> break;
> - default:
> - BUG();
> }
>
> - BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
> - ffe_ctl->index);
> btrfs_release_block_group(block_group, delalloc);
> }
>
> @@ -4164,6 +4160,8 @@ static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl,
> static void found_extent(struct find_free_extent_ctl *ffe_ctl,
> struct btrfs_key *ins)
> {
> + ASSERT(ffe_ctl->policy == BTRFS_EXTENT_ALLOC_CLUSTERED ||
> + ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED);
> switch (ffe_ctl->policy) {
> case BTRFS_EXTENT_ALLOC_CLUSTERED:
> found_extent_clustered(ffe_ctl, ins);
> @@ -4171,8 +4169,6 @@ static void found_extent(struct find_free_extent_ctl *ffe_ctl,
> case BTRFS_EXTENT_ALLOC_ZONED:
> /* Nothing to do */
> break;
> - default:
> - BUG();
> }
> }
>
> @@ -4232,14 +4228,15 @@ static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info,
> static int can_allocate_chunk(struct btrfs_fs_info *fs_info,
> struct find_free_extent_ctl *ffe_ctl)
> {
> + ASSERT(ffe_ctl->policy == BTRFS_EXTENT_ALLOC_CLUSTERED ||
> + ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED);
> switch (ffe_ctl->policy) {
> case BTRFS_EXTENT_ALLOC_CLUSTERED:
> return 0;
> case BTRFS_EXTENT_ALLOC_ZONED:
> return can_allocate_chunk_zoned(fs_info, ffe_ctl);
> - default:
> - BUG();
> }
> + return -EUCLEAN;
> }
>
> /*
> @@ -4310,8 +4307,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
> if (ret == -ENOSPC) {
> ret = 0;
> ffe_ctl->loop++;
> - }
> - else if (ret < 0)
> + } else if (ret < 0)
> btrfs_abort_transaction(trans, ret);
> else
> ret = 0;
> @@ -4441,15 +4437,16 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
> struct btrfs_space_info *space_info,
> struct btrfs_key *ins)
> {
> + ASSERT(ffe_ctl->policy == BTRFS_EXTENT_ALLOC_CLUSTERED ||
> + ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED);
> switch (ffe_ctl->policy) {
> case BTRFS_EXTENT_ALLOC_CLUSTERED:
> return prepare_allocation_clustered(fs_info, ffe_ctl,
> space_info, ins);
> case BTRFS_EXTENT_ALLOC_ZONED:
> return prepare_allocation_zoned(fs_info, ffe_ctl, space_info);
> - default:
> - BUG();
> }
> + return -EUCLEAN;
> }
>
> /*
> @@ -5260,6 +5257,8 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
> bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
> u64 owning_root;
>
> + ASSERT(parent <= 0);
> +
> #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
> if (btrfs_is_testing(fs_info)) {
> buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
> @@ -5292,8 +5291,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
> parent = ins.objectid;
> flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
> owning_root = reloc_src_root;
> - } else
> - BUG_ON(parent > 0);
> + }
>
> if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
> struct btrfs_delayed_extent_op *extent_op;
> @@ -5633,7 +5631,7 @@ static int check_ref_exists(struct btrfs_trans_handle *trans,
> * If we get 0 then we found our reference, return 1, else
> * return the error if it's not -ENOENT;
> */
> - return (ret < 0 ) ? ret : 1;
> + return (ret < 0) ? ret : 1;
> }
>
> /*
> @@ -6437,7 +6435,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
> int parent_level;
> int ret = 0;
>
> - BUG_ON(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
> + ASSERT(btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID);
>
> path = btrfs_alloc_path();
> if (!path)
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2 1/2] btrfs: replace BUG() with error handling in compression.c
2026-02-28 9:06 ` [PATCH v2 1/2] btrfs: replace BUG() with error handling in compression.c Adarsh Das
2026-02-28 20:36 ` Qu Wenruo
@ 2026-03-01 11:34 ` Filipe Manana
1 sibling, 0 replies; 6+ messages in thread
From: Filipe Manana @ 2026-03-01 11:34 UTC (permalink / raw)
To: Adarsh Das; +Cc: clm, dsterba, terrelln, linux-btrfs, linux-kernel
On Sat, Feb 28, 2026 at 9:07 AM Adarsh Das <adarshdas950@gmail.com> wrote:
>
> v2:
> - use ASSERT() instead of btrfs_err() + -EUCLEAN
> - remove default: branches and add upfront ASSERT() for type validation
> - fold coding style fixes into this patch
>
> Signed-off-by: Adarsh Das <adarshdas950@gmail.com>
> ---
> fs/btrfs/compression.c | 74 ++++++++++++++----------------------------
> 1 file changed, 25 insertions(+), 49 deletions(-)
>
> diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
> index 790518a8c803..0d8da8ce5fd3 100644
> --- a/fs/btrfs/compression.c
> +++ b/fs/btrfs/compression.c
> @@ -36,9 +36,9 @@
>
> static struct bio_set btrfs_compressed_bioset;
>
> -static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
> +static const char * const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
Please don't do that.
You are changing a lot of lines in this patch, and the next one, just
to change coding style.
We don't do that in btrfs: we only fix the coding style of a line when
we need to change it anyway due to a bug fix, refactoring, cleanup,
implementing something new, etc.
Also don't send patches to fix only coding style.
Thanks.
>
> -const char* btrfs_compress_type2str(enum btrfs_compression_type type)
> +const char *btrfs_compress_type2str(enum btrfs_compression_type type)
> {
> switch (type) {
> case BTRFS_COMPRESS_ZLIB:
> @@ -89,24 +89,21 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
> static int compression_decompress_bio(struct list_head *ws,
> struct compressed_bio *cb)
> {
> + ASSERT(cb->compress_type > BTRFS_COMPRESS_NONE &&
> + cb->compress_type < BTRFS_NR_COMPRESS_TYPES);
> switch (cb->compress_type) {
> case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
> case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
> case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
> - case BTRFS_COMPRESS_NONE:
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> + return -EUCLEAN;
> }
>
> static int compression_decompress(int type, struct list_head *ws,
> const u8 *data_in, struct folio *dest_folio,
> unsigned long dest_pgoff, size_t srclen, size_t destlen)
> {
> + ASSERT(type > BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
> switch (type) {
> case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_folio,
> dest_pgoff, srclen, destlen);
> @@ -114,14 +111,8 @@ static int compression_decompress(int type, struct list_head *ws,
> dest_pgoff, srclen, destlen);
> case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_folio,
> dest_pgoff, srclen, destlen);
> - case BTRFS_COMPRESS_NONE:
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> + return -EUCLEAN;
> }
>
> static int btrfs_decompress_bio(struct compressed_bio *cb);
> @@ -484,6 +475,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
>
> if (zero_offset) {
> int zeros;
> +
> zeros = folio_size(folio) - zero_offset;
> folio_zero_range(folio, zero_offset, zeros);
> }
> @@ -697,33 +689,25 @@ static const struct btrfs_compress_levels * const btrfs_compress_levels[] = {
>
> static struct list_head *alloc_workspace(struct btrfs_fs_info *fs_info, int type, int level)
> {
> +
> + ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
> switch (type) {
> case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(fs_info);
> case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(fs_info, level);
> case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(fs_info);
> case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(fs_info, level);
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> + return ERR_PTR(-EUCLEAN);
> }
>
> static void free_workspace(int type, struct list_head *ws)
> {
> + ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
> switch (type) {
> case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
> case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
> case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
> case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> }
>
> @@ -792,7 +776,7 @@ struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, i
> struct workspace_manager *wsm = fs_info->compr_wsm[type];
> struct list_head *workspace;
> int cpus = num_online_cpus();
> - unsigned nofs_flag;
> + unsigned int nofs_flag;
> struct list_head *idle_ws;
> spinlock_t *ws_lock;
> atomic_t *total_ws;
> @@ -868,18 +852,14 @@ struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, i
>
> static struct list_head *get_workspace(struct btrfs_fs_info *fs_info, int type, int level)
> {
> + ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
> switch (type) {
> case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(fs_info, type, level);
> case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(fs_info, level);
> case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(fs_info, type, level);
> case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(fs_info, level);
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> + return ERR_PTR(-EUCLEAN);
> }
>
> /*
> @@ -919,17 +899,12 @@ void btrfs_put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_he
>
> static void put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws)
> {
> + ASSERT(type >= BTRFS_COMPRESS_NONE && type < BTRFS_NR_COMPRESS_TYPES);
> switch (type) {
> case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(fs_info, type, ws);
> case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(fs_info, type, ws);
> case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(fs_info, type, ws);
> case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(fs_info, ws);
> - default:
> - /*
> - * This can't happen, the type is validated several times
> - * before we get here.
> - */
> - BUG();
> }
> }
>
> @@ -1181,17 +1156,17 @@ static u64 file_offset_from_bvec(const struct bio_vec *bvec)
> * @buf: The decompressed data buffer
> * @buf_len: The decompressed data length
> * @decompressed: Number of bytes that are already decompressed inside the
> - * compressed extent
> + * compressed extent
> * @cb: The compressed extent descriptor
> * @orig_bio: The original bio that the caller wants to read for
> *
> * An easier to understand graph is like below:
> *
> - * |<- orig_bio ->| |<- orig_bio->|
> - * |<------- full decompressed extent ----->|
> - * |<----------- @cb range ---->|
> - * | |<-- @buf_len -->|
> - * |<--- @decompressed --->|
> + * |<- orig_bio ->| |<- orig_bio->|
> + * |<------- full decompressed extent ----->|
> + * |<----------- @cb range ---->|
> + * | |<-- @buf_len -->|
> + * |<--- @decompressed --->|
> *
> * Note that, @cb can be a subpage of the full decompressed extent, but
> * @cb->start always has the same as the orig_file_offset value of the full
> @@ -1313,7 +1288,8 @@ static u32 shannon_entropy(struct heuristic_ws *ws)
> #define RADIX_BASE 4U
> #define COUNTERS_SIZE (1U << RADIX_BASE)
>
> -static u8 get4bits(u64 num, int shift) {
> +static u8 get4bits(u64 num, int shift)
> +{
> u8 low4bits;
>
> num >>= shift;
> @@ -1388,7 +1364,7 @@ static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
> */
> memset(counters, 0, sizeof(counters));
>
> - for (i = 0; i < num; i ++) {
> + for (i = 0; i < num; i++) {
> buf_num = array_buf[i].count;
> addr = get4bits(buf_num, shift);
> counters[addr]++;
> --
> 2.53.0
>
>
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2026-03-01 11:35 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-28 9:06 [PATCH v2 0/2] replace BUG() and BUG_ON() with error handling Adarsh Das
2026-02-28 9:06 ` [PATCH v2 1/2] btrfs: replace BUG() with error handling in compression.c Adarsh Das
2026-02-28 20:36 ` Qu Wenruo
2026-03-01 11:34 ` Filipe Manana
2026-02-28 9:06 ` [PATCH v2 2/2] btrfs: replace BUG() and BUG_ON() with error handling in extent-tree.c Adarsh Das
2026-02-28 20:37 ` Qu Wenruo
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox