public inbox for linux-btrfs@vger.kernel.org
 help / color / mirror / Atom feed
From: Su Yue <Damenly_Su@gmx.com>
To: Naohiro Aota <naohiro.aota@wdc.com>,
	linux-btrfs@vger.kernel.org, David Sterba <dsterba@suse.com>
Cc: Chris Mason <clm@fb.com>, Josef Bacik <josef@toxicpanda.com>,
	Nikolay Borisov <nborisov@suse.com>,
	Damien Le Moal <damien.lemoal@wdc.com>,
	Johannes Thumshirn <Johannes.Thumshirn@wdc.com>,
	Hannes Reinecke <hare@suse.com>,
	Anand Jain <anand.jain@oracle.com>,
	linux-fsdevel@vger.kernel.org
Subject: Re: [PATCH 12/20] btrfs: introduce clustered_alloc_info
Date: Thu, 6 Feb 2020 20:44:51 +0800	[thread overview]
Message-ID: <c235054d-49b1-28b5-0f3b-d7bc1cecd766@gmx.com> (raw)
In-Reply-To: <20200206104214.400857-13-naohiro.aota@wdc.com>

On 2020/2/6 6:42 PM, Naohiro Aota wrote:
> Introduce struct clustered_alloc_info to manage parameters related to
> clustered allocation. By separating clustered_alloc_info and
> find_free_extent_ctl, we can introduce other allocation policy. One can
> access per-allocation policy private information from "alloc_info" of
> struct find_free_extent_ctl.
>
> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
> ---
>   fs/btrfs/extent-tree.c | 99 +++++++++++++++++++++++++-----------------
>   1 file changed, 59 insertions(+), 40 deletions(-)
>
> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
> index b1f52eee24fe..8124a6461043 100644
> --- a/fs/btrfs/extent-tree.c
> +++ b/fs/btrfs/extent-tree.c
> @@ -3456,9 +3456,6 @@ struct find_free_extent_ctl {
>   	/* Where to start the search inside the bg */
>   	u64 search_start;
>
> -	/* For clustered allocation */
> -	u64 empty_cluster;
> -
>   	bool have_caching_bg;
>   	bool orig_have_caching_bg;
>
> @@ -3470,18 +3467,6 @@ struct find_free_extent_ctl {
>   	 */
>   	int loop;
>
> -	/*
> -	 * Whether we're refilling a cluster, if true we need to re-search
> -	 * current block group but don't try to refill the cluster again.
> -	 */
> -	bool retry_clustered;
> -
> -	/*
> -	 * Whether we're updating free space cache, if true we need to re-search
> -	 * current block group but don't try updating free space cache again.
> -	 */
> -	bool retry_unclustered;
> -
>   	/* If current block group is cached */
>   	int cached;
>
> @@ -3499,8 +3484,28 @@ struct find_free_extent_ctl {
>
>   	/* Allocation policy */
>   	enum btrfs_extent_allocation_policy policy;
> +	void *alloc_info;
>   };
>
> +struct clustered_alloc_info {
> +	/* For clustered allocation */
> +	u64 empty_cluster;
> +
> +	/*
> +	 * Whether we're refilling a cluster, if true we need to re-search
> +	 * current block group but don't try to refill the cluster again.
> +	 */
> +	bool retry_clustered;
> +
> +	/*
> +	 * Whether we're updating free space cache, if true we need to re-search
> +	 * current block group but don't try updating free space cache again.
> +	 */
> +	bool retry_unclustered;
> +
> +	struct btrfs_free_cluster *last_ptr;
> +	bool use_cluster;
> +};
>
>   /*
>    * Helper function for find_free_extent().
> @@ -3516,6 +3521,7 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
>   		struct btrfs_block_group **cluster_bg_ret)
>   {
>   	struct btrfs_block_group *cluster_bg;
> +	struct clustered_alloc_info *clustered = ffe_ctl->alloc_info;
>   	u64 aligned_cluster;
>   	u64 offset;
>   	int ret;
> @@ -3572,7 +3578,7 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
>   	}
>
>   	aligned_cluster = max_t(u64,
> -			ffe_ctl->empty_cluster + ffe_ctl->empty_size,
> +			clustered->empty_cluster + ffe_ctl->empty_size,
>   			bg->full_stripe_len);
>   	ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start,
>   			ffe_ctl->num_bytes, aligned_cluster);
> @@ -3591,12 +3597,12 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
>   			return 0;
>   		}
>   	} else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
> -		   !ffe_ctl->retry_clustered) {
> +		   !clustered->retry_clustered) {
>   		spin_unlock(&last_ptr->refill_lock);
>
> -		ffe_ctl->retry_clustered = true;
> +		clustered->retry_clustered = true;
>   		btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
> -				ffe_ctl->empty_cluster + ffe_ctl->empty_size);
> +				clustered->empty_cluster + ffe_ctl->empty_size);
>   		return -EAGAIN;
>   	}
>   	/*
> @@ -3618,6 +3624,7 @@ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
>   		struct btrfs_free_cluster *last_ptr,
>   		struct find_free_extent_ctl *ffe_ctl)
>   {
> +	struct clustered_alloc_info *clustered = ffe_ctl->alloc_info;
>   	u64 offset;
>
>   	/*
> @@ -3636,7 +3643,7 @@ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
>   		free_space_ctl = bg->free_space_ctl;
>   		spin_lock(&free_space_ctl->tree_lock);
>   		if (free_space_ctl->free_space <
> -		    ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
> +		    ffe_ctl->num_bytes + clustered->empty_cluster +
>   		    ffe_ctl->empty_size) {
>   			ffe_ctl->total_free_space = max_t(u64,
>   					ffe_ctl->total_free_space,
> @@ -3660,11 +3667,11 @@ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
>   	 * If @retry_unclustered is true then we've already waited on this
>   	 * block group once and should move on to the next block group.
>   	 */
> -	if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
> +	if (!offset && !clustered->retry_unclustered && !ffe_ctl->cached &&
>   	    ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
>   		btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
>   						      ffe_ctl->empty_size);
> -		ffe_ctl->retry_unclustered = true;
> +		clustered->retry_unclustered = true;
>   		return -EAGAIN;
>   	} else if (!offset) {
>   		return 1;
> @@ -3685,6 +3692,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
>   					bool full_search, bool use_cluster)
>   {
>   	struct btrfs_root *root = fs_info->extent_root;
> +	struct clustered_alloc_info *clustered = ffe_ctl->alloc_info;
>   	int ret;
>
>   	if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) &&
> @@ -3774,10 +3782,10 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
>   			 * no empty_cluster.
>   			 */
>   			if (ffe_ctl->empty_size == 0 &&
> -			    ffe_ctl->empty_cluster == 0)
> +			    clustered->empty_cluster == 0)
>   				return -ENOSPC;
>   			ffe_ctl->empty_size = 0;
> -			ffe_ctl->empty_cluster = 0;
> +			clustered->empty_cluster = 0;
>   		}
>   		return 1;
>   	}
> @@ -3816,11 +3824,10 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
>   {
>   	int ret = 0;
>   	int cache_block_group_error = 0;
> -	struct btrfs_free_cluster *last_ptr = NULL;
>   	struct btrfs_block_group *block_group = NULL;
>   	struct find_free_extent_ctl ffe_ctl = {0};
>   	struct btrfs_space_info *space_info;
> -	bool use_cluster = true;
> +	struct clustered_alloc_info *clustered = NULL;
>   	bool full_search = false;
>
>   	WARN_ON(num_bytes < fs_info->sectorsize);
> @@ -3829,8 +3836,6 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
>   	ffe_ctl.empty_size = empty_size;
>   	ffe_ctl.flags = flags;
>   	ffe_ctl.search_start = 0;
> -	ffe_ctl.retry_clustered = false;
> -	ffe_ctl.retry_unclustered = false;
>   	ffe_ctl.delalloc = delalloc;
>   	ffe_ctl.index = btrfs_bg_flags_to_raid_index(flags);
>   	ffe_ctl.have_caching_bg = false;
> @@ -3851,6 +3856,15 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
>   		return -ENOSPC;
>   	}
>
> +	clustered = kzalloc(sizeof(*clustered), GFP_NOFS);
> +	if (!clustered)
> +		return -ENOMEM;

NIT of coding style, please pick the kzalloc after the whole assignment
zone.

> +	clustered->last_ptr = NULL;
> +	clustered->use_cluster = true;
> +	clustered->retry_clustered = false;
> +	clustered->retry_unclustered = false;
> +	ffe_ctl.alloc_info = clustered;
> +
>   	/*
>   	 * If our free space is heavily fragmented we may not be able to make
>   	 * big contiguous allocations, so instead of doing the expensive search
> @@ -3869,14 +3883,16 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
>   			spin_unlock(&space_info->lock);
>   			return -ENOSPC;
>   		} else if (space_info->max_extent_size) {
> -			use_cluster = false;
> +			clustered->use_cluster = false;
>   		}
>   		spin_unlock(&space_info->lock);
>   	}
>
> -	last_ptr = fetch_cluster_info(fs_info, space_info,
> -				      &ffe_ctl.empty_cluster);
> -	if (last_ptr) {
> +	clustered->last_ptr = fetch_cluster_info(fs_info, space_info,
> +						 &clustered->empty_cluster);
> +	if (clustered->last_ptr) {
> +		struct btrfs_free_cluster *last_ptr = clustered->last_ptr;
> +
>   		spin_lock(&last_ptr->lock);
>   		if (last_ptr->block_group)
>   			ffe_ctl.hint_byte = last_ptr->window_start;
> @@ -3887,7 +3903,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
>   			 * some time.
>   			 */
>   			ffe_ctl.hint_byte = last_ptr->window_start;
> -			use_cluster = false;
> +			clustered->use_cluster = false;
>   		}
>   		spin_unlock(&last_ptr->lock);
>   	}
> @@ -4000,10 +4016,11 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
>   		 * Ok we want to try and use the cluster allocator, so
>   		 * lets look there
>   		 */
> -		if (last_ptr && use_cluster) {
> +		if (clustered->last_ptr && clustered->use_cluster) {
>   			struct btrfs_block_group *cluster_bg = NULL;
>
> -			ret = find_free_extent_clustered(block_group, last_ptr,
> +			ret = find_free_extent_clustered(block_group,
> +							 clustered->last_ptr,
>   							 &ffe_ctl, &cluster_bg);
>
>   			if (ret == 0) {
> @@ -4021,7 +4038,8 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
>   			/* ret == -ENOENT case falls through */
>   		}
>
> -		ret = find_free_extent_unclustered(block_group, last_ptr,
> +		ret = find_free_extent_unclustered(block_group,
> +						   clustered->last_ptr,
>   						   &ffe_ctl);
>   		if (ret == -EAGAIN)
>   			goto have_block_group;
> @@ -4062,8 +4080,8 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
>   		btrfs_release_block_group(block_group, delalloc);
>   		break;
>   loop:
> -		ffe_ctl.retry_clustered = false;
> -		ffe_ctl.retry_unclustered = false;
> +		clustered->retry_clustered = false;
> +		clustered->retry_unclustered = false;
>   		BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
>   		       ffe_ctl.index);
>   		btrfs_release_block_group(block_group, delalloc);
> @@ -4071,8 +4089,9 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
>   	}
>   	up_read(&space_info->groups_sem);
>
> -	ret = find_free_extent_update_loop(fs_info, last_ptr, ins, &ffe_ctl,
> -					   full_search, use_cluster);
> +	ret = find_free_extent_update_loop(fs_info, clustered->last_ptr, ins,
> +					   &ffe_ctl, full_search,
> +					   clustered->use_cluster);
>   	if (ret > 0)
>   		goto search;
>
>


  reply	other threads:[~2020-02-06 12:45 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-06 10:41 [PATCH 00/20] btrfs: refactor and generalize chunk/dev_extent/extent allocation Naohiro Aota
2020-02-06 10:41 ` [PATCH 01/20] btrfs: change type of full_search to bool Naohiro Aota
2020-02-06 11:26   ` Johannes Thumshirn
2020-02-06 16:03   ` Josef Bacik
2020-02-06 10:41 ` [PATCH 02/20] btrfs: introduce chunk allocation policy Naohiro Aota
2020-02-06 11:30   ` Johannes Thumshirn
2020-02-07  6:11     ` Naohiro Aota
2020-02-06 16:06   ` Josef Bacik
2020-02-06 16:07   ` Josef Bacik
2020-02-06 10:41 ` [PATCH 03/20] btrfs: refactor find_free_dev_extent_start() Naohiro Aota
2020-02-06 12:02   ` Johannes Thumshirn
2020-02-07  6:22     ` Naohiro Aota
2020-02-06 16:34   ` Josef Bacik
2020-02-06 10:41 ` [PATCH 04/20] btrfs: introduce alloc_chunk_ctl Naohiro Aota
2020-02-06 12:07   ` Johannes Thumshirn
2020-02-06 16:38   ` Josef Bacik
2020-02-07  7:08     ` Naohiro Aota
2020-02-06 10:41 ` [PATCH 05/20] btrfs: factor out set_parameters() Naohiro Aota
2020-02-06 13:51   ` Johannes Thumshirn
2020-02-06 16:40   ` Josef Bacik
2020-02-07  7:59     ` Naohiro Aota
2020-02-06 10:42 ` [PATCH 06/20] btrfs: factor out gather_device_info() Naohiro Aota
2020-02-06 15:43   ` Johannes Thumshirn
2020-02-07  9:54     ` Naohiro Aota
2020-02-06 16:44   ` Josef Bacik
2020-02-06 10:42 ` [PATCH 07/20] btrfs: factor out decide_stripe_size() Naohiro Aota
2020-02-06 15:59   ` Johannes Thumshirn
2020-02-06 16:47   ` Josef Bacik
2020-02-06 10:42 ` [PATCH 08/20] btrfs: factor out create_chunk() Naohiro Aota
2020-02-06 16:49   ` Josef Bacik
2020-02-07  9:17     ` Naohiro Aota
2020-02-06 10:42 ` [PATCH 09/20] btrfs: parameterize dev_extent_min Naohiro Aota
2020-02-06 16:52   ` Josef Bacik
2020-02-07  9:00     ` Naohiro Aota
2020-02-06 10:42 ` [PATCH 10/20] btrfs: introduce extent allocation policy Naohiro Aota
2020-02-06 10:42 ` [PATCH 11/20] btrfs: move hint_byte into find_free_extent_ctl Naohiro Aota
2020-02-06 10:42 ` [PATCH 12/20] btrfs: introduce clustered_alloc_info Naohiro Aota
2020-02-06 12:44   ` Su Yue [this message]
2020-02-07  9:25     ` Naohiro Aota
2020-02-06 17:01   ` Josef Bacik
2020-02-07  9:53     ` Naohiro Aota
2020-02-06 10:42 ` [PATCH 13/20] btrfs: factor out do_allocation() Naohiro Aota
2020-02-06 10:42 ` [PATCH 14/20] btrfs: drop unnecessary arguments from clustered allocation functions Naohiro Aota
2020-02-06 10:42 ` [PATCH 15/20] btrfs: factor out release_block_group() Naohiro Aota
2020-02-06 10:42 ` [PATCH 16/20] btrfs: factor out found_extent() Naohiro Aota
2020-02-06 10:42 ` [PATCH 17/20] btrfs: drop unnecessary arguments from find_free_extent_update_loop() Naohiro Aota
2020-02-06 10:42 ` [PATCH 18/20] btrfs: factor out chunk_allocation_failed() Naohiro Aota
2020-02-06 10:42 ` [PATCH 19/20] btrfs: skip LOOP_NO_EMPTY_SIZE if not clustered allocation Naohiro Aota
2020-02-06 10:42 ` [PATCH 20/20] btrfs: factor out prepare_allocation() Naohiro Aota
2020-02-06 11:43 ` [PATCH 00/20] btrfs: refactor and generalize chunk/dev_extent/extent allocation Martin Steigerwald
2020-02-07  6:06   ` Naohiro Aota
2020-02-07  8:02     ` Martin Steigerwald

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=c235054d-49b1-28b5-0f3b-d7bc1cecd766@gmx.com \
    --to=damenly_su@gmx.com \
    --cc=Johannes.Thumshirn@wdc.com \
    --cc=anand.jain@oracle.com \
    --cc=clm@fb.com \
    --cc=damien.lemoal@wdc.com \
    --cc=dsterba@suse.com \
    --cc=hare@suse.com \
    --cc=josef@toxicpanda.com \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=naohiro.aota@wdc.com \
    --cc=nborisov@suse.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox