public inbox for linux-btrfs@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 0/2] btrfs: fix periodic reclaim condition
@ 2026-01-13  4:07 Sun YangKai
  2026-01-13  4:07 ` [PATCH v3 1/2] " Sun YangKai
  2026-01-13  4:07 ` [PATCH v3 2/2] btrfs: consolidate reclaim readiness checks in btrfs_should_reclaim() Sun YangKai
  0 siblings, 2 replies; 6+ messages in thread
From: Sun YangKai @ 2026-01-13  4:07 UTC (permalink / raw)
  To: linux-btrfs; +Cc: Sun YangKai

This series eliminates wasteful periodic reclaim operations that were occurring
when already failed to reclaim any new space, and includes several preparatory
cleanups.

Patch 1 fixes the core issue.
Patch 2 is a trival cleanup.

Changelog

v3:
- Fix the core issue with minimal changes, suggested by Boris Burkov <boris@bur.io>
- Drop some cleanups which might conflict with some other recent patches
  in mail list. I'll send them in a seperate serie.

Sun YangKai (2):
  btrfs: fix periodic reclaim condition
  btrfs: consolidate reclaim readiness checks in btrfs_should_reclaim()

 fs/btrfs/block-group.c | 18 +++++++++++-------
 fs/btrfs/space-info.c  | 21 ++++++++++++---------
 2 files changed, 23 insertions(+), 16 deletions(-)

-- 
2.52.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v3 1/2] btrfs: fix periodic reclaim condition
  2026-01-13  4:07 [PATCH v3 0/2] btrfs: fix periodic reclaim condition Sun YangKai
@ 2026-01-13  4:07 ` Sun YangKai
  2026-01-13 18:32   ` Boris Burkov
  2026-01-13  4:07 ` [PATCH v3 2/2] btrfs: consolidate reclaim readiness checks in btrfs_should_reclaim() Sun YangKai
  1 sibling, 1 reply; 6+ messages in thread
From: Sun YangKai @ 2026-01-13  4:07 UTC (permalink / raw)
  To: linux-btrfs; +Cc: Sun YangKai, Boris Burkov

Problems with current implementation:
1. reclaimable_bytes is signed while chunk_sz is unsigned, causing
   negative reclaimable_bytes to trigger reclaim unexpectedly
2. The "space must be freed between scans" assumption breaks the
   two-scan requirement: first scan marks block groups, second scan
   reclaims them. Without the second scan, no reclamation occurs.

Instead, track actual reclaim progress: pause reclaim when block groups
will be reclaimed, and resume only when progress is made. This ensures
reclaim continues until no further progress can be made. And resume
perioidc reclaim  when there's enough free space.

Suggested-by: Boris Burkov <boris@bur.io>
Fixes: 813d4c6422516 ("btrfs: prevent pathological periodic reclaim loops")
Signed-off-by: Sun YangKai <sunk67188@gmail.com>
---
 fs/btrfs/block-group.c |  6 +++++-
 fs/btrfs/space-info.c  | 21 ++++++++++++---------
 2 files changed, 17 insertions(+), 10 deletions(-)

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index e417aba4c4c7..f0945a799aed 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1871,6 +1871,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
 	while (!list_empty(&fs_info->reclaim_bgs)) {
 		u64 used;
 		u64 reserved;
+		u64 old_total;
 		int ret = 0;
 
 		bg = list_first_entry(&fs_info->reclaim_bgs,
@@ -1936,6 +1937,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
 		}
 
 		spin_unlock(&bg->lock);
+		old_total = space_info->total_bytes;
 		spin_unlock(&space_info->lock);
 
 		/*
@@ -1989,13 +1991,15 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
 			spin_lock(&space_info->lock);
 			space_info->reclaim_errors++;
 			if (READ_ONCE(space_info->periodic_reclaim))
-				space_info->periodic_reclaim_ready = false;
+				btrfs_set_periodic_reclaim_ready(space_info, false);
 			spin_unlock(&space_info->lock);
 		}
 		spin_lock(&space_info->lock);
 		space_info->reclaim_count++;
 		space_info->reclaim_bytes += used;
 		space_info->reclaim_bytes += reserved;
+		if (space_info->total_bytes < old_total)
+			btrfs_set_periodic_reclaim_ready(space_info, true);
 		spin_unlock(&space_info->lock);
 
 next:
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 7b7b7255f7d8..7d2386ea86c5 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -2079,11 +2079,11 @@ static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
 	return unalloc < data_chunk_size;
 }
 
-static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
+static bool do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
 {
 	struct btrfs_block_group *bg;
 	int thresh_pct;
-	bool try_again = true;
+	bool will_reclaim = false;
 	bool urgent;
 
 	spin_lock(&space_info->lock);
@@ -2101,7 +2101,7 @@ static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
 		spin_lock(&bg->lock);
 		thresh = mult_perc(bg->length, thresh_pct);
 		if (bg->used < thresh && bg->reclaim_mark) {
-			try_again = false;
+			will_reclaim = true;
 			reclaim = true;
 		}
 		bg->reclaim_mark++;
@@ -2118,12 +2118,13 @@ static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
 	 * If we have any staler groups, we don't touch the fresher ones, but if we
 	 * really need a block group, do take a fresh one.
 	 */
-	if (try_again && urgent) {
-		try_again = false;
+	if (!will_reclaim && urgent) {
+		urgent = false;
 		goto again;
 	}
 
 	up_read(&space_info->groups_sem);
+	return will_reclaim;
 }
 
 void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
@@ -2133,7 +2134,8 @@ void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s6
 	lockdep_assert_held(&space_info->lock);
 	space_info->reclaimable_bytes += bytes;
 
-	if (space_info->reclaimable_bytes >= chunk_sz)
+	if (space_info->reclaimable_bytes > 0 &&
+	    space_info->reclaimable_bytes >= chunk_sz)
 		btrfs_set_periodic_reclaim_ready(space_info, true);
 }
 
@@ -2160,7 +2162,6 @@ static bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
 
 	spin_lock(&space_info->lock);
 	ret = space_info->periodic_reclaim_ready;
-	btrfs_set_periodic_reclaim_ready(space_info, false);
 	spin_unlock(&space_info->lock);
 
 	return ret;
@@ -2174,8 +2175,10 @@ void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
 	list_for_each_entry(space_info, &fs_info->space_info, list) {
 		if (!btrfs_should_periodic_reclaim(space_info))
 			continue;
-		for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++)
-			do_reclaim_sweep(space_info, raid);
+		for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
+			if (do_reclaim_sweep(space_info, raid))
+				btrfs_set_periodic_reclaim_ready(space_info, false);
+		}
 	}
 }
 
-- 
2.52.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v3 2/2] btrfs: consolidate reclaim readiness checks in btrfs_should_reclaim()
  2026-01-13  4:07 [PATCH v3 0/2] btrfs: fix periodic reclaim condition Sun YangKai
  2026-01-13  4:07 ` [PATCH v3 1/2] " Sun YangKai
@ 2026-01-13  4:07 ` Sun YangKai
  2026-01-13 18:32   ` Boris Burkov
  1 sibling, 1 reply; 6+ messages in thread
From: Sun YangKai @ 2026-01-13  4:07 UTC (permalink / raw)
  To: linux-btrfs; +Cc: Sun YangKai

Move the filesystem state validation from btrfs_reclaim_bgs_work() into
btrfs_should_reclaim() to centralize the reclaim eligibility logic.
Since it is the only caller of btrfs_should_reclaim(), there's no
functional change.

Signed-off-by: Sun YangKai <sunk67188@gmail.com>
---
 fs/btrfs/block-group.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index f0945a799aed..9ca4db7eebfb 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1804,6 +1804,12 @@ static int reclaim_bgs_cmp(void *unused, const struct list_head *a,
 
 static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info)
 {
+	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
+		return false;
+
+	if (btrfs_fs_closing(fs_info))
+		return false;
+
 	if (btrfs_is_zoned(fs_info))
 		return btrfs_zoned_should_reclaim(fs_info);
 	return true;
@@ -1838,12 +1844,6 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
 	struct btrfs_space_info *space_info;
 	LIST_HEAD(retry_list);
 
-	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
-		return;
-
-	if (btrfs_fs_closing(fs_info))
-		return;
-
 	if (!btrfs_should_reclaim(fs_info))
 		return;
 
-- 
2.52.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/2] btrfs: fix periodic reclaim condition
  2026-01-13  4:07 ` [PATCH v3 1/2] " Sun YangKai
@ 2026-01-13 18:32   ` Boris Burkov
  2026-01-14  3:44     ` Sun Yangkai
  0 siblings, 1 reply; 6+ messages in thread
From: Boris Burkov @ 2026-01-13 18:32 UTC (permalink / raw)
  To: Sun YangKai; +Cc: linux-btrfs

On Tue, Jan 13, 2026 at 12:07:04PM +0800, Sun YangKai wrote:
> Problems with current implementation:
> 1. reclaimable_bytes is signed while chunk_sz is unsigned, causing
>    negative reclaimable_bytes to trigger reclaim unexpectedly
> 2. The "space must be freed between scans" assumption breaks the
>    two-scan requirement: first scan marks block groups, second scan
>    reclaims them. Without the second scan, no reclamation occurs.
> 
> Instead, track actual reclaim progress: pause reclaim when block groups
> will be reclaimed, and resume only when progress is made. This ensures
> reclaim continues until no further progress can be made. And resume
> perioidc reclaim  when there's enough free space.
> 
> Suggested-by: Boris Burkov <boris@bur.io>
> Fixes: 813d4c6422516 ("btrfs: prevent pathological periodic reclaim loops")

Made a small inline suggestion, but you can add
Reviewed-by: Boris Burkov <boris@bur.io>

> Signed-off-by: Sun YangKai <sunk67188@gmail.com>
> ---
>  fs/btrfs/block-group.c |  6 +++++-
>  fs/btrfs/space-info.c  | 21 ++++++++++++---------
>  2 files changed, 17 insertions(+), 10 deletions(-)
> 
> diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
> index e417aba4c4c7..f0945a799aed 100644
> --- a/fs/btrfs/block-group.c
> +++ b/fs/btrfs/block-group.c
> @@ -1871,6 +1871,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
>  	while (!list_empty(&fs_info->reclaim_bgs)) {
>  		u64 used;
>  		u64 reserved;
> +		u64 old_total;
>  		int ret = 0;
>  
>  		bg = list_first_entry(&fs_info->reclaim_bgs,
> @@ -1936,6 +1937,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
>  		}
>  
>  		spin_unlock(&bg->lock);
> +		old_total = space_info->total_bytes;
>  		spin_unlock(&space_info->lock);
>  
>  		/*
> @@ -1989,13 +1991,15 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
>  			spin_lock(&space_info->lock);
>  			space_info->reclaim_errors++;
>  			if (READ_ONCE(space_info->periodic_reclaim))
> -				space_info->periodic_reclaim_ready = false;
> +				btrfs_set_periodic_reclaim_ready(space_info, false);

I think it probably makes more sense to remove this one entirely, since
it's already false from the sweep, then only set it true if we succeed
and the total_bytes goes down.

>  			spin_unlock(&space_info->lock);
>  		}
>  		spin_lock(&space_info->lock);
>  		space_info->reclaim_count++;
>  		space_info->reclaim_bytes += used;
>  		space_info->reclaim_bytes += reserved;
> +		if (space_info->total_bytes < old_total)
> +			btrfs_set_periodic_reclaim_ready(space_info, true);
>  		spin_unlock(&space_info->lock);
>  
>  next:
> diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
> index 7b7b7255f7d8..7d2386ea86c5 100644
> --- a/fs/btrfs/space-info.c
> +++ b/fs/btrfs/space-info.c
> @@ -2079,11 +2079,11 @@ static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
>  	return unalloc < data_chunk_size;
>  }
>  
> -static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
> +static bool do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
>  {
>  	struct btrfs_block_group *bg;
>  	int thresh_pct;
> -	bool try_again = true;
> +	bool will_reclaim = false;
>  	bool urgent;
>  
>  	spin_lock(&space_info->lock);
> @@ -2101,7 +2101,7 @@ static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
>  		spin_lock(&bg->lock);
>  		thresh = mult_perc(bg->length, thresh_pct);
>  		if (bg->used < thresh && bg->reclaim_mark) {
> -			try_again = false;
> +			will_reclaim = true;
>  			reclaim = true;
>  		}
>  		bg->reclaim_mark++;
> @@ -2118,12 +2118,13 @@ static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
>  	 * If we have any staler groups, we don't touch the fresher ones, but if we
>  	 * really need a block group, do take a fresh one.
>  	 */
> -	if (try_again && urgent) {
> -		try_again = false;
> +	if (!will_reclaim && urgent) {
> +		urgent = false;
>  		goto again;
>  	}
>  
>  	up_read(&space_info->groups_sem);
> +	return will_reclaim;
>  }
>  
>  void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
> @@ -2133,7 +2134,8 @@ void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s6
>  	lockdep_assert_held(&space_info->lock);
>  	space_info->reclaimable_bytes += bytes;
>  
> -	if (space_info->reclaimable_bytes >= chunk_sz)
> +	if (space_info->reclaimable_bytes > 0 &&
> +	    space_info->reclaimable_bytes >= chunk_sz)
>  		btrfs_set_periodic_reclaim_ready(space_info, true);
>  }
>  
> @@ -2160,7 +2162,6 @@ static bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
>  
>  	spin_lock(&space_info->lock);
>  	ret = space_info->periodic_reclaim_ready;
> -	btrfs_set_periodic_reclaim_ready(space_info, false);
>  	spin_unlock(&space_info->lock);
>  
>  	return ret;
> @@ -2174,8 +2175,10 @@ void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
>  	list_for_each_entry(space_info, &fs_info->space_info, list) {
>  		if (!btrfs_should_periodic_reclaim(space_info))
>  			continue;
> -		for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++)
> -			do_reclaim_sweep(space_info, raid);
> +		for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
> +			if (do_reclaim_sweep(space_info, raid))
> +				btrfs_set_periodic_reclaim_ready(space_info, false);
> +		}
>  	}
>  }
>  
> -- 
> 2.52.0
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 2/2] btrfs: consolidate reclaim readiness checks in btrfs_should_reclaim()
  2026-01-13  4:07 ` [PATCH v3 2/2] btrfs: consolidate reclaim readiness checks in btrfs_should_reclaim() Sun YangKai
@ 2026-01-13 18:32   ` Boris Burkov
  0 siblings, 0 replies; 6+ messages in thread
From: Boris Burkov @ 2026-01-13 18:32 UTC (permalink / raw)
  To: Sun YangKai; +Cc: linux-btrfs

On Tue, Jan 13, 2026 at 12:07:05PM +0800, Sun YangKai wrote:
> Move the filesystem state validation from btrfs_reclaim_bgs_work() into
> btrfs_should_reclaim() to centralize the reclaim eligibility logic.
> Since it is the only caller of btrfs_should_reclaim(), there's no
> functional change.
> 

Reviewed-by: Boris Burkov <boris@bur.io>

> Signed-off-by: Sun YangKai <sunk67188@gmail.com>
> ---
>  fs/btrfs/block-group.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
> index f0945a799aed..9ca4db7eebfb 100644
> --- a/fs/btrfs/block-group.c
> +++ b/fs/btrfs/block-group.c
> @@ -1804,6 +1804,12 @@ static int reclaim_bgs_cmp(void *unused, const struct list_head *a,
>  
>  static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info)
>  {
> +	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
> +		return false;
> +
> +	if (btrfs_fs_closing(fs_info))
> +		return false;
> +
>  	if (btrfs_is_zoned(fs_info))
>  		return btrfs_zoned_should_reclaim(fs_info);
>  	return true;
> @@ -1838,12 +1844,6 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
>  	struct btrfs_space_info *space_info;
>  	LIST_HEAD(retry_list);
>  
> -	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
> -		return;
> -
> -	if (btrfs_fs_closing(fs_info))
> -		return;
> -
>  	if (!btrfs_should_reclaim(fs_info))
>  		return;
>  
> -- 
> 2.52.0
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/2] btrfs: fix periodic reclaim condition
  2026-01-13 18:32   ` Boris Burkov
@ 2026-01-14  3:44     ` Sun Yangkai
  0 siblings, 0 replies; 6+ messages in thread
From: Sun Yangkai @ 2026-01-14  3:44 UTC (permalink / raw)
  To: Boris Burkov; +Cc: linux-btrfs



在 2026/1/14 02:32, Boris Burkov 写道:
> On Tue, Jan 13, 2026 at 12:07:04PM +0800, Sun YangKai wrote:
>> Problems with current implementation:
>> 1. reclaimable_bytes is signed while chunk_sz is unsigned, causing
>>    negative reclaimable_bytes to trigger reclaim unexpectedly
>> 2. The "space must be freed between scans" assumption breaks the
>>    two-scan requirement: first scan marks block groups, second scan
>>    reclaims them. Without the second scan, no reclamation occurs.
>>
>> Instead, track actual reclaim progress: pause reclaim when block groups
>> will be reclaimed, and resume only when progress is made. This ensures
>> reclaim continues until no further progress can be made. And resume
>> perioidc reclaim  when there's enough free space.
>>
>> Suggested-by: Boris Burkov <boris@bur.io>
>> Fixes: 813d4c6422516 ("btrfs: prevent pathological periodic reclaim loops")
> 
> Made a small inline suggestion, but you can add
> Reviewed-by: Boris Burkov <boris@bur.io>
> 
>> Signed-off-by: Sun YangKai <sunk67188@gmail.com>
>> ---
>>  fs/btrfs/block-group.c |  6 +++++-
>>  fs/btrfs/space-info.c  | 21 ++++++++++++---------
>>  2 files changed, 17 insertions(+), 10 deletions(-)
>>
>> diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
>> index e417aba4c4c7..f0945a799aed 100644
>> --- a/fs/btrfs/block-group.c
>> +++ b/fs/btrfs/block-group.c
>> @@ -1871,6 +1871,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
>>  	while (!list_empty(&fs_info->reclaim_bgs)) {
>>  		u64 used;
>>  		u64 reserved;
>> +		u64 old_total;
>>  		int ret = 0;
>>  
>>  		bg = list_first_entry(&fs_info->reclaim_bgs,
>> @@ -1936,6 +1937,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
>>  		}
>>  
>>  		spin_unlock(&bg->lock);
>> +		old_total = space_info->total_bytes;
>>  		spin_unlock(&space_info->lock);
>>  
>>  		/*
>> @@ -1989,13 +1991,15 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
>>  			spin_lock(&space_info->lock);
>>  			space_info->reclaim_errors++;
>>  			if (READ_ONCE(space_info->periodic_reclaim))
>> -				space_info->periodic_reclaim_ready = false;
>> +				btrfs_set_periodic_reclaim_ready(space_info, false);
> 
> I think it probably makes more sense to remove this one entirely, since
> it's already false from the sweep, then only set it true if we succeed
> and the total_bytes goes down.

Yes, that makes a lot of sense. Thanks a lot.

>>  			spin_unlock(&space_info->lock);
>>  		}
>>  		spin_lock(&space_info->lock);
>>  		space_info->reclaim_count++;
>>  		space_info->reclaim_bytes += used;
>>  		space_info->reclaim_bytes += reserved;
>> +		if (space_info->total_bytes < old_total)
>> +			btrfs_set_periodic_reclaim_ready(space_info, true);
>>  		spin_unlock(&space_info->lock);
>>  
>>  next:
>> diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
>> index 7b7b7255f7d8..7d2386ea86c5 100644
>> --- a/fs/btrfs/space-info.c
>> +++ b/fs/btrfs/space-info.c
>> @@ -2079,11 +2079,11 @@ static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
>>  	return unalloc < data_chunk_size;
>>  }
>>  
>> -static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
>> +static bool do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
>>  {
>>  	struct btrfs_block_group *bg;
>>  	int thresh_pct;
>> -	bool try_again = true;
>> +	bool will_reclaim = false;
>>  	bool urgent;
>>  
>>  	spin_lock(&space_info->lock);
>> @@ -2101,7 +2101,7 @@ static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
>>  		spin_lock(&bg->lock);
>>  		thresh = mult_perc(bg->length, thresh_pct);
>>  		if (bg->used < thresh && bg->reclaim_mark) {
>> -			try_again = false;
>> +			will_reclaim = true;
>>  			reclaim = true;
>>  		}
>>  		bg->reclaim_mark++;
>> @@ -2118,12 +2118,13 @@ static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
>>  	 * If we have any staler groups, we don't touch the fresher ones, but if we
>>  	 * really need a block group, do take a fresh one.
>>  	 */
>> -	if (try_again && urgent) {
>> -		try_again = false;
>> +	if (!will_reclaim && urgent) {
>> +		urgent = false;
>>  		goto again;
>>  	}
>>  
>>  	up_read(&space_info->groups_sem);
>> +	return will_reclaim;
>>  }
>>  
>>  void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
>> @@ -2133,7 +2134,8 @@ void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s6
>>  	lockdep_assert_held(&space_info->lock);
>>  	space_info->reclaimable_bytes += bytes;
>>  
>> -	if (space_info->reclaimable_bytes >= chunk_sz)
>> +	if (space_info->reclaimable_bytes > 0 &&
>> +	    space_info->reclaimable_bytes >= chunk_sz)
>>  		btrfs_set_periodic_reclaim_ready(space_info, true);
>>  }
>>  
>> @@ -2160,7 +2162,6 @@ static bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
>>  
>>  	spin_lock(&space_info->lock);
>>  	ret = space_info->periodic_reclaim_ready;
>> -	btrfs_set_periodic_reclaim_ready(space_info, false);
>>  	spin_unlock(&space_info->lock);
>>  
>>  	return ret;
>> @@ -2174,8 +2175,10 @@ void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
>>  	list_for_each_entry(space_info, &fs_info->space_info, list) {
>>  		if (!btrfs_should_periodic_reclaim(space_info))
>>  			continue;
>> -		for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++)
>> -			do_reclaim_sweep(space_info, raid);
>> +		for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
>> +			if (do_reclaim_sweep(space_info, raid))
>> +				btrfs_set_periodic_reclaim_ready(space_info, false);
>> +		}
>>  	}
>>  }
>>  
>> -- 
>> 2.52.0
>>


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2026-01-14  3:44 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-01-13  4:07 [PATCH v3 0/2] btrfs: fix periodic reclaim condition Sun YangKai
2026-01-13  4:07 ` [PATCH v3 1/2] " Sun YangKai
2026-01-13 18:32   ` Boris Burkov
2026-01-14  3:44     ` Sun Yangkai
2026-01-13  4:07 ` [PATCH v3 2/2] btrfs: consolidate reclaim readiness checks in btrfs_should_reclaim() Sun YangKai
2026-01-13 18:32   ` Boris Burkov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox