Linux Btrfs filesystem development
 help / color / mirror / Atom feed
From: Qu Wenruo <quwenruo.btrfs@gmx.com>
To: Gladyshev Ilya <foxido@foxido.dev>
Cc: Chris Mason <clm@fb.com>, David Sterba <dsterba@suse.com>,
	linux-btrfs@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: Re: [RFC PATCH 7/8] btrfs: simplify return path via cleanup.h
Date: Thu, 13 Nov 2025 07:20:01 +1030	[thread overview]
Message-ID: <0a31cf84-ccfe-4da4-b922-85da570c6e3b@gmx.com> (raw)
In-Reply-To: <5bfb05436ed8f80bc060a745805d54309cdba59c.1762972845.git.foxido@foxido.dev>



在 2025/11/13 05:19, Gladyshev Ilya 写道:
> In some functions removing cleanup paths allows to overall simplify it's
> code, so replace cleanup paths with guard()s.
> 
> Signed-off-by: Gladyshev Ilya <foxido@foxido.dev>
> ---
>   fs/btrfs/extent-io-tree.c | 21 ++++-----
>   fs/btrfs/extent-tree.c    | 96 ++++++++++++++++-----------------------
>   fs/btrfs/ordered-data.c   | 46 ++++++++-----------
>   3 files changed, 68 insertions(+), 95 deletions(-)
> 
> diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
> index 69ea2bd359a6..88d7aed7055f 100644
> --- a/fs/btrfs/extent-io-tree.c
> +++ b/fs/btrfs/extent-io-tree.c
> @@ -890,9 +890,8 @@ bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
>   				 struct extent_state **cached_state)
>   {
>   	struct extent_state *state;
> -	bool ret = false;
>   
> -	spin_lock(&tree->lock);
> +	guard(spinlock)(&tree->lock);
>   	if (cached_state && *cached_state) {
>   		state = *cached_state;
>   		if (state->end == start - 1 && extent_state_in_tree(state)) {
> @@ -911,23 +910,21 @@ bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
>   			*cached_state = NULL;
>   			if (state)
>   				goto got_it;
> -			goto out;
> +			return false;
>   		}
>   		btrfs_free_extent_state(*cached_state);
>   		*cached_state = NULL;
>   	}
>   
>   	state = find_first_extent_bit_state(tree, start, bits);
> +	if (!state)
> +		return false;
> +
>   got_it:
> -	if (state) {
> -		cache_state_if_flags(state, cached_state, 0);
> -		*start_ret = state->start;
> -		*end_ret = state->end;
> -		ret = true;
> -	}
> -out:
> -	spin_unlock(&tree->lock);
> -	return ret;
> +	cache_state_if_flags(state, cached_state, 0);
> +	*start_ret = state->start;
> +	*end_ret = state->end;
> +	return true;
>   }
>   
>   /*
> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
> index f9744e456c6c..cb3d61d96e66 100644
> --- a/fs/btrfs/extent-tree.c
> +++ b/fs/btrfs/extent-tree.c
> @@ -1878,16 +1878,14 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
>   	 * and then re-check to make sure nobody got added.
>   	 */
>   	spin_unlock(&head->lock);
> -	spin_lock(&delayed_refs->lock);
> -	spin_lock(&head->lock);
> -	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
> -		spin_unlock(&head->lock);
> -		spin_unlock(&delayed_refs->lock);
> -		return 1;
> +	{

There are some internal discussion about such anonymous code block usage.

Although I support such usage, especially when it can reduce the 
lifespan of local variables, it's not a commonly accepted pattern yet.

Thanks,
Qu

> +		guard(spinlock)(&delayed_refs->lock);
> +		guard(spinlock)(&head->lock);
> +
> +		if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op)
> +			return 1;
> +		btrfs_delete_ref_head(fs_info, delayed_refs, head);
>   	}
> -	btrfs_delete_ref_head(fs_info, delayed_refs, head);
> -	spin_unlock(&head->lock);
> -	spin_unlock(&delayed_refs->lock);
>   
>   	if (head->must_insert_reserved) {
>   		btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1);
> @@ -3391,30 +3389,29 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
>   	int ret = 0;
>   
>   	delayed_refs = &trans->transaction->delayed_refs;
> -	spin_lock(&delayed_refs->lock);
> -	head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
> -	if (!head)
> -		goto out_delayed_unlock;
> -
> -	spin_lock(&head->lock);
> -	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
> -		goto out;
> +	{
> +		guard(spinlock)(&delayed_refs->lock);
> +		head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
> +		if (!head)
> +			return 0;
>   
> -	if (cleanup_extent_op(head) != NULL)
> -		goto out;
> +		guard(spinlock)(&head->lock);
> +		if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
> +			return 0;
>   
> -	/*
> -	 * waiting for the lock here would deadlock.  If someone else has it
> -	 * locked they are already in the process of dropping it anyway
> -	 */
> -	if (!mutex_trylock(&head->mutex))
> -		goto out;
> +		if (cleanup_extent_op(head) != NULL)
> +			return 0;
>   
> -	btrfs_delete_ref_head(fs_info, delayed_refs, head);
> -	head->processing = false;
> +		/*
> +		 * waiting for the lock here would deadlock.  If someone else has it
> +		 * locked they are already in the process of dropping it anyway
> +		 */
> +		if (!mutex_trylock(&head->mutex))
> +			return 0;
>   
> -	spin_unlock(&head->lock);
> -	spin_unlock(&delayed_refs->lock);
> +		btrfs_delete_ref_head(fs_info, delayed_refs, head);
> +		head->processing = false;
> +	}
>   
>   	BUG_ON(head->extent_op);
>   	if (head->must_insert_reserved)
> @@ -3424,12 +3421,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
>   	mutex_unlock(&head->mutex);
>   	btrfs_put_delayed_ref_head(head);
>   	return ret;
> -out:
> -	spin_unlock(&head->lock);
> -
> -out_delayed_unlock:
> -	spin_unlock(&delayed_refs->lock);
> -	return 0;
>   }
>   
>   int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
> @@ -3910,13 +3901,13 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
>   		 */
>   	}
>   
> -	spin_lock(&space_info->lock);
> -	spin_lock(&block_group->lock);
> -	spin_lock(&fs_info->treelog_bg_lock);
> -	spin_lock(&fs_info->relocation_bg_lock);
> +	guard(spinlock)(&space_info->lock);
> +	guard(spinlock)(&block_group->lock);
> +	guard(spinlock)(&fs_info->treelog_bg_lock);
> +	guard(spinlock)(&fs_info->relocation_bg_lock);
>   
>   	if (ret)
> -		goto out;
> +		goto err;
>   
>   	ASSERT(!ffe_ctl->for_treelog ||
>   	       block_group->start == fs_info->treelog_bg ||
> @@ -3928,8 +3919,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
>   	if (block_group->ro ||
>   	    (!ffe_ctl->for_data_reloc &&
>   	     test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) {
> -		ret = 1;
> -		goto out;
> +		goto err;
>   	}
>   
>   	/*
> @@ -3938,8 +3928,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
>   	 */
>   	if (ffe_ctl->for_treelog && !fs_info->treelog_bg &&
>   	    (block_group->used || block_group->reserved)) {
> -		ret = 1;
> -		goto out;
> +		goto err;
>   	}
>   
>   	/*
> @@ -3948,8 +3937,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
>   	 */
>   	if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg &&
>   	    (block_group->used || block_group->reserved)) {
> -		ret = 1;
> -		goto out;
> +		goto err;
>   	}
>   
>   	WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity);
> @@ -3963,8 +3951,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
>   			ffe_ctl->max_extent_size = avail;
>   			ffe_ctl->total_free_space = avail;
>   		}
> -		ret = 1;
> -		goto out;
> +		goto err;
>   	}
>   
>   	if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
> @@ -4003,17 +3990,14 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
>   	 */
>   
>   	ffe_ctl->search_start = ffe_ctl->found_offset;
> +	return 0;
>   
> -out:
> -	if (ret && ffe_ctl->for_treelog)
> +err:
> +	if (ffe_ctl->for_treelog)
>   		fs_info->treelog_bg = 0;
> -	if (ret && ffe_ctl->for_data_reloc)
> +	if (ffe_ctl->for_data_reloc)
>   		fs_info->data_reloc_bg = 0;
> -	spin_unlock(&fs_info->relocation_bg_lock);
> -	spin_unlock(&fs_info->treelog_bg_lock);
> -	spin_unlock(&block_group->lock);
> -	spin_unlock(&space_info->lock);
> -	return ret;
> +	return 1;
>   }
>   
>   static int do_allocation(struct btrfs_block_group *block_group,
> diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
> index 451b60de4550..4dbec4ef4ffd 100644
> --- a/fs/btrfs/ordered-data.c
> +++ b/fs/btrfs/ordered-data.c
> @@ -995,35 +995,29 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
>   	struct rb_node *node;
>   	struct btrfs_ordered_extent *entry = NULL;
>   
> -	spin_lock_irq(&inode->ordered_tree_lock);
> +	guard(spinlock_irq)(&inode->ordered_tree_lock);
>   	node = ordered_tree_search(inode, file_offset);
>   	if (!node) {
>   		node = ordered_tree_search(inode, file_offset + len);
>   		if (!node)
> -			goto out;
> +			return NULL;
>   	}
>   
>   	while (1) {
>   		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
> -		if (btrfs_range_overlaps(entry, file_offset, len))
> -			break;
> +		if (btrfs_range_overlaps(entry, file_offset, len)) {
> +			refcount_inc(&entry->refs);
> +			trace_btrfs_ordered_extent_lookup_range(inode, entry);
> +			return entry;
> +		}
>   
>   		if (entry->file_offset >= file_offset + len) {
> -			entry = NULL;
> -			break;
> +			return NULL;
>   		}
> -		entry = NULL;
>   		node = rb_next(node);
>   		if (!node)
> -			break;
> +			return NULL;
>   	}
> -out:
> -	if (entry) {
> -		refcount_inc(&entry->refs);
> -		trace_btrfs_ordered_extent_lookup_range(inode, entry);
> -	}
> -	spin_unlock_irq(&inode->ordered_tree_lock);
> -	return entry;
>   }
>   
>   /*
> @@ -1092,7 +1086,7 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
>   	struct rb_node *next;
>   	struct btrfs_ordered_extent *entry = NULL;
>   
> -	spin_lock_irq(&inode->ordered_tree_lock);
> +	guard(spinlock_irq)(&inode->ordered_tree_lock);
>   	node = inode->ordered_tree.rb_node;
>   	/*
>   	 * Here we don't want to use tree_search() which will use tree->last
> @@ -1112,12 +1106,12 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
>   			 * Direct hit, got an ordered extent that starts at
>   			 * @file_offset
>   			 */
> -			goto out;
> +			goto ret_entry;
>   		}
>   	}
>   	if (!entry) {
>   		/* Empty tree */
> -		goto out;
> +		return NULL;
>   	}
>   
>   	cur = &entry->rb_node;
> @@ -1132,22 +1126,20 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
>   	if (prev) {
>   		entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
>   		if (btrfs_range_overlaps(entry, file_offset, len))
> -			goto out;
> +			goto ret_entry;
>   	}
>   	if (next) {
>   		entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
>   		if (btrfs_range_overlaps(entry, file_offset, len))
> -			goto out;
> +			goto ret_entry;
>   	}
>   	/* No ordered extent in the range */
> -	entry = NULL;
> -out:
> -	if (entry) {
> -		refcount_inc(&entry->refs);
> -		trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
> -	}
> +	return NULL;
> +
> +ret_entry:
> +	refcount_inc(&entry->refs);
> +	trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
>   
> -	spin_unlock_irq(&inode->ordered_tree_lock);
>   	return entry;
>   }
>   


  reply	other threads:[~2025-11-12 20:50 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-12 18:49 [RFC PATCH 0/8] use cleanup.h in btrfs Gladyshev Ilya
2025-11-12 18:49 ` [RFC PATCH 1/8] btrfs: remove redundant label in __del_qgroup_relation Gladyshev Ilya
2025-11-12 18:49 ` [RFC PATCH 2/8] btrfs: move kfree out of btrfs_create_qgroup's cleanup path Gladyshev Ilya
2025-11-12 20:30   ` Qu Wenruo
2025-11-12 18:49 ` [RFC PATCH 3/8] btrfs: simplify control flow in scrub_simple_mirror Gladyshev Ilya
2025-11-12 18:49 ` [RFC PATCH 4/8] btrfs: simplify function protections with guards Gladyshev Ilya
2025-11-13  8:43   ` David Sterba
2025-11-13 10:06     ` Gladyshev Ilya
2025-11-13 11:25       ` David Sterba
2025-11-13 12:30         ` Daniel Vacek
2025-11-12 18:49 ` [RFC PATCH 5/8] btrfs: use cleanup.h guard()s to simplify unlocks on return Gladyshev Ilya
2025-11-12 18:49 ` [RFC PATCH 6/8] btrfs: simplify cleanup via scoped_guard() Gladyshev Ilya
2025-11-13  8:48   ` David Sterba
2025-11-12 18:49 ` [RFC PATCH 7/8] btrfs: simplify return path via cleanup.h Gladyshev Ilya
2025-11-12 20:50   ` Qu Wenruo [this message]
2025-11-13  8:54     ` David Sterba
2025-11-13 12:48       ` Daniel Vacek
2025-11-12 18:49 ` [RFC PATCH 8/8] btrfs: simplify cleanup in btrfs_add_qgroup_relation Gladyshev Ilya
2025-11-12 20:46   ` Qu Wenruo
2025-11-12 20:55 ` [RFC PATCH 0/8] use cleanup.h in btrfs Qu Wenruo
2025-11-13  8:01   ` Gladyshev Ilya

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0a31cf84-ccfe-4da4-b922-85da570c6e3b@gmx.com \
    --to=quwenruo.btrfs@gmx.com \
    --cc=clm@fb.com \
    --cc=dsterba@suse.com \
    --cc=foxido@foxido.dev \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox