* zone GC ringbuffer fix and cleanup
@ 2026-01-27 15:10 Christoph Hellwig
2026-01-27 15:10 ` [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer Christoph Hellwig
` (2 more replies)
0 siblings, 3 replies; 11+ messages in thread
From: Christoph Hellwig @ 2026-01-27 15:10 UTC (permalink / raw)
To: Carlos Maiolino
Cc: Hans Holmberg, Darrick J. Wong, Chris Mason, Keith Busch,
linux-xfs
Hi all,
the first patch fixes the empty vs full condition detection in the ring
buffer as noted by Chris. Keith had an alternative version that uses
overflowing unsigned integer arithmetics, but just having an addition
counter as in this version feels even simpler. The second one is a
trivial cleanup in the same area.
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer
2026-01-27 15:10 zone GC ringbuffer fix and cleanup Christoph Hellwig
@ 2026-01-27 15:10 ` Christoph Hellwig
2026-01-28 5:35 ` Darrick J. Wong
` (3 more replies)
2026-01-27 15:10 ` [PATCH 2/2] xfs: remove xfs_zone_gc_space_available Christoph Hellwig
2026-01-27 15:25 ` zone GC ringbuffer fix and cleanup Keith Busch
2 siblings, 4 replies; 11+ messages in thread
From: Christoph Hellwig @ 2026-01-27 15:10 UTC (permalink / raw)
To: Carlos Maiolino
Cc: Hans Holmberg, Darrick J. Wong, Chris Mason, Keith Busch,
linux-xfs
When scratch_head wraps back to 0 and scratch_tail is also 0 because no
I/O has completed yet, the ring buffer could be mistaken for empty.
Fix this by introducing a separate scratch_available member in
struct xfs_zone_gc_data. This actually ends up simplifying the code as
well.
Reported-by: Chris Mason <clm@meta.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
fs/xfs/xfs_zone_gc.c | 25 +++++++++----------------
1 file changed, 9 insertions(+), 16 deletions(-)
diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c
index dfa6653210c7..8c08e5519bff 100644
--- a/fs/xfs/xfs_zone_gc.c
+++ b/fs/xfs/xfs_zone_gc.c
@@ -131,10 +131,13 @@ struct xfs_zone_gc_data {
/*
* Scratchpad to buffer GC data, organized as a ring buffer over
* discontiguous folios. scratch_head is where the buffer is filled,
- * and scratch_tail tracks the buffer space freed.
+ * scratch_tail tracks the buffer space freed, and scratch_available
+ * counts the space available in the ring buffer between the head and
+ * the tail.
*/
struct folio *scratch_folios[XFS_GC_NR_BUFS];
unsigned int scratch_size;
+ unsigned int scratch_available;
unsigned int scratch_head;
unsigned int scratch_tail;
@@ -212,6 +215,7 @@ xfs_zone_gc_data_alloc(
goto out_free_scratch;
}
data->scratch_size = XFS_GC_BUF_SIZE * XFS_GC_NR_BUFS;
+ data->scratch_available = data->scratch_size;
INIT_LIST_HEAD(&data->reading);
INIT_LIST_HEAD(&data->writing);
INIT_LIST_HEAD(&data->resetting);
@@ -574,18 +578,6 @@ xfs_zone_gc_ensure_target(
return oz;
}
-static unsigned int
-xfs_zone_gc_scratch_available(
- struct xfs_zone_gc_data *data)
-{
- if (!data->scratch_tail)
- return data->scratch_size - data->scratch_head;
-
- if (!data->scratch_head)
- return data->scratch_tail;
- return (data->scratch_size - data->scratch_head) + data->scratch_tail;
-}
-
static bool
xfs_zone_gc_space_available(
struct xfs_zone_gc_data *data)
@@ -596,7 +588,7 @@ xfs_zone_gc_space_available(
if (!oz)
return false;
return oz->oz_allocated < rtg_blocks(oz->oz_rtg) &&
- xfs_zone_gc_scratch_available(data);
+ data->scratch_available;
}
static void
@@ -625,8 +617,7 @@ xfs_zone_gc_alloc_blocks(
if (!oz)
return NULL;
- *count_fsb = min(*count_fsb,
- XFS_B_TO_FSB(mp, xfs_zone_gc_scratch_available(data)));
+ *count_fsb = min(*count_fsb, XFS_B_TO_FSB(mp, data->scratch_available));
/*
* Directly allocate GC blocks from the reserved pool.
@@ -730,6 +721,7 @@ xfs_zone_gc_start_chunk(
bio->bi_end_io = xfs_zone_gc_end_io;
xfs_zone_gc_add_data(chunk);
data->scratch_head = (data->scratch_head + len) % data->scratch_size;
+ data->scratch_available -= len;
WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW);
list_add_tail(&chunk->entry, &data->reading);
@@ -862,6 +854,7 @@ xfs_zone_gc_finish_chunk(
data->scratch_tail =
(data->scratch_tail + chunk->len) % data->scratch_size;
+ data->scratch_available += chunk->len;
/*
* Cycle through the iolock and wait for direct I/O and layouts to
--
2.47.3
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 2/2] xfs: remove xfs_zone_gc_space_available
2026-01-27 15:10 zone GC ringbuffer fix and cleanup Christoph Hellwig
2026-01-27 15:10 ` [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer Christoph Hellwig
@ 2026-01-27 15:10 ` Christoph Hellwig
2026-01-28 12:02 ` Hans Holmberg
2026-01-27 15:25 ` zone GC ringbuffer fix and cleanup Keith Busch
2 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2026-01-27 15:10 UTC (permalink / raw)
To: Carlos Maiolino
Cc: Hans Holmberg, Darrick J. Wong, Chris Mason, Keith Busch,
linux-xfs
xfs_zone_gc_space_available only has one caller left, so fold it into
that. Reorder the checks so that the cheaper scratch_available check
is done first.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
fs/xfs/xfs_zone_gc.c | 21 +++++++--------------
1 file changed, 7 insertions(+), 14 deletions(-)
diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c
index 8c08e5519bff..7bdc5043cc1a 100644
--- a/fs/xfs/xfs_zone_gc.c
+++ b/fs/xfs/xfs_zone_gc.c
@@ -578,19 +578,6 @@ xfs_zone_gc_ensure_target(
return oz;
}
-static bool
-xfs_zone_gc_space_available(
- struct xfs_zone_gc_data *data)
-{
- struct xfs_open_zone *oz;
-
- oz = xfs_zone_gc_ensure_target(data->mp);
- if (!oz)
- return false;
- return oz->oz_allocated < rtg_blocks(oz->oz_rtg) &&
- data->scratch_available;
-}
-
static void
xfs_zone_gc_end_io(
struct bio *bio)
@@ -989,9 +976,15 @@ static bool
xfs_zone_gc_should_start_new_work(
struct xfs_zone_gc_data *data)
{
+ struct xfs_open_zone *oz;
+
if (xfs_is_shutdown(data->mp))
return false;
- if (!xfs_zone_gc_space_available(data))
+ if (!data->scratch_available)
+ return false;
+
+ oz = xfs_zone_gc_ensure_target(data->mp);
+ if (!oz || oz->oz_allocated == rtg_blocks(oz->oz_rtg))
return false;
if (!data->iter.victim_rtg) {
--
2.47.3
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: zone GC ringbuffer fix and cleanup
2026-01-27 15:10 zone GC ringbuffer fix and cleanup Christoph Hellwig
2026-01-27 15:10 ` [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer Christoph Hellwig
2026-01-27 15:10 ` [PATCH 2/2] xfs: remove xfs_zone_gc_space_available Christoph Hellwig
@ 2026-01-27 15:25 ` Keith Busch
2 siblings, 0 replies; 11+ messages in thread
From: Keith Busch @ 2026-01-27 15:25 UTC (permalink / raw)
To: Christoph Hellwig
Cc: Carlos Maiolino, Hans Holmberg, Darrick J. Wong, Chris Mason,
linux-xfs
On Tue, Jan 27, 2026 at 04:10:19PM +0100, Christoph Hellwig wrote:
> the first patch fixes the empty vs full condition detection in the ring
> buffer as noted by Chris. Keith had an alternative version that uses
> overflowing unsigned integer arithmetics, but just having an addition
> counter as in this version feels even simpler. The second one is a
> trivial cleanup in the same area.
I checked out these patches earlier directly from your tree, and these
look good to me.
Reviewed-by: Keith Busch <kbusch@kernel.org>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer
2026-01-27 15:10 ` [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer Christoph Hellwig
@ 2026-01-28 5:35 ` Darrick J. Wong
2026-01-28 5:52 ` Christoph Hellwig
2026-01-28 5:56 ` Christoph Hellwig
` (2 subsequent siblings)
3 siblings, 1 reply; 11+ messages in thread
From: Darrick J. Wong @ 2026-01-28 5:35 UTC (permalink / raw)
To: Christoph Hellwig
Cc: Carlos Maiolino, Hans Holmberg, Chris Mason, Keith Busch,
linux-xfs
On Tue, Jan 27, 2026 at 04:10:20PM +0100, Christoph Hellwig wrote:
> When scratch_head wraps back to 0 and scratch_tail is also 0 because no
> I/O has completed yet, the ring buffer could be mistaken for empty.
>
> Fix this by introducing a separate scratch_available member in
> struct xfs_zone_gc_data. This actually ends up simplifying the code as
> well.
>
> Reported-by: Chris Mason <clm@meta.com>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> fs/xfs/xfs_zone_gc.c | 25 +++++++++----------------
> 1 file changed, 9 insertions(+), 16 deletions(-)
>
> diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c
> index dfa6653210c7..8c08e5519bff 100644
> --- a/fs/xfs/xfs_zone_gc.c
> +++ b/fs/xfs/xfs_zone_gc.c
> @@ -131,10 +131,13 @@ struct xfs_zone_gc_data {
> /*
> * Scratchpad to buffer GC data, organized as a ring buffer over
> * discontiguous folios. scratch_head is where the buffer is filled,
> - * and scratch_tail tracks the buffer space freed.
> + * scratch_tail tracks the buffer space freed, and scratch_available
> + * counts the space available in the ring buffer between the head and
> + * the tail.
> */
> struct folio *scratch_folios[XFS_GC_NR_BUFS];
> unsigned int scratch_size;
> + unsigned int scratch_available;
> unsigned int scratch_head;
> unsigned int scratch_tail;
Hrm. I did some digging (because clearly I'm not that good at
ringbuffer) and came up with this gem from akpm:
"A circular buffer implementation needs only head and tail indices.
`size' above appears to be redundant.
"Implementation-wise, the head and tail indices should *not* be
constrained to be less than the size of the buffer. They should be
allowed to wrap all the way back to zero. This allows you to distinguish
between the completely-empty and completely-full states while using 100%
of the storage."
https://lkml.iu.edu/hypermail/linux/kernel/0409.1/2709.html
Can that apply here?
--D
>
> @@ -212,6 +215,7 @@ xfs_zone_gc_data_alloc(
> goto out_free_scratch;
> }
> data->scratch_size = XFS_GC_BUF_SIZE * XFS_GC_NR_BUFS;
> + data->scratch_available = data->scratch_size;
> INIT_LIST_HEAD(&data->reading);
> INIT_LIST_HEAD(&data->writing);
> INIT_LIST_HEAD(&data->resetting);
> @@ -574,18 +578,6 @@ xfs_zone_gc_ensure_target(
> return oz;
> }
>
> -static unsigned int
> -xfs_zone_gc_scratch_available(
> - struct xfs_zone_gc_data *data)
> -{
> - if (!data->scratch_tail)
> - return data->scratch_size - data->scratch_head;
> -
> - if (!data->scratch_head)
> - return data->scratch_tail;
> - return (data->scratch_size - data->scratch_head) + data->scratch_tail;
> -}
> -
> static bool
> xfs_zone_gc_space_available(
> struct xfs_zone_gc_data *data)
> @@ -596,7 +588,7 @@ xfs_zone_gc_space_available(
> if (!oz)
> return false;
> return oz->oz_allocated < rtg_blocks(oz->oz_rtg) &&
> - xfs_zone_gc_scratch_available(data);
> + data->scratch_available;
> }
>
> static void
> @@ -625,8 +617,7 @@ xfs_zone_gc_alloc_blocks(
> if (!oz)
> return NULL;
>
> - *count_fsb = min(*count_fsb,
> - XFS_B_TO_FSB(mp, xfs_zone_gc_scratch_available(data)));
> + *count_fsb = min(*count_fsb, XFS_B_TO_FSB(mp, data->scratch_available));
>
> /*
> * Directly allocate GC blocks from the reserved pool.
> @@ -730,6 +721,7 @@ xfs_zone_gc_start_chunk(
> bio->bi_end_io = xfs_zone_gc_end_io;
> xfs_zone_gc_add_data(chunk);
> data->scratch_head = (data->scratch_head + len) % data->scratch_size;
> + data->scratch_available -= len;
>
> WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW);
> list_add_tail(&chunk->entry, &data->reading);
> @@ -862,6 +854,7 @@ xfs_zone_gc_finish_chunk(
>
> data->scratch_tail =
> (data->scratch_tail + chunk->len) % data->scratch_size;
> + data->scratch_available += chunk->len;
>
> /*
> * Cycle through the iolock and wait for direct I/O and layouts to
> --
> 2.47.3
>
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer
2026-01-28 5:35 ` Darrick J. Wong
@ 2026-01-28 5:52 ` Christoph Hellwig
0 siblings, 0 replies; 11+ messages in thread
From: Christoph Hellwig @ 2026-01-28 5:52 UTC (permalink / raw)
To: Darrick J. Wong
Cc: Christoph Hellwig, Carlos Maiolino, Hans Holmberg, Chris Mason,
Keith Busch, linux-xfs
On Tue, Jan 27, 2026 at 09:35:11PM -0800, Darrick J. Wong wrote:
> > + unsigned int scratch_available;
> > unsigned int scratch_head;
> > unsigned int scratch_tail;
>
> Hrm. I did some digging (because clearly I'm not that good at
> ringbuffer) and came up with this gem from akpm:
>
> "A circular buffer implementation needs only head and tail indices.
> `size' above appears to be redundant.
>
> "Implementation-wise, the head and tail indices should *not* be
> constrained to be less than the size of the buffer. They should be
> allowed to wrap all the way back to zero. This allows you to distinguish
> between the completely-empty and completely-full states while using 100%
> of the storage."
>
> https://lkml.iu.edu/hypermail/linux/kernel/0409.1/2709.html
>
> Can that apply here?
It could, see the version Keith posted. But this one is actually
slightly simpler, while the unsigned overlflow version requires saves 4
bytes of memory per file system (+/- 4 bytes of padding for either
version which I haven't checked) and a single add and sub instruction
each for every GC operation.
My preference is this simpler version, but the other one should work
just fine as well.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer
2026-01-27 15:10 ` [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer Christoph Hellwig
2026-01-28 5:35 ` Darrick J. Wong
@ 2026-01-28 5:56 ` Christoph Hellwig
2026-01-28 10:09 ` Carlos Maiolino
2026-01-28 12:00 ` Hans Holmberg
2026-01-29 11:32 ` Carlos Maiolino
3 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2026-01-28 5:56 UTC (permalink / raw)
To: Carlos Maiolino
Cc: Hans Holmberg, Darrick J. Wong, Chris Mason, Keith Busch,
linux-xfs
... and I clearly can't spell "separate". Carlos, if this goes
ahead, can you fix it up? Otherwise I'll do it for the next version.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer
2026-01-28 5:56 ` Christoph Hellwig
@ 2026-01-28 10:09 ` Carlos Maiolino
0 siblings, 0 replies; 11+ messages in thread
From: Carlos Maiolino @ 2026-01-28 10:09 UTC (permalink / raw)
To: Christoph Hellwig
Cc: Hans Holmberg, Darrick J. Wong, Chris Mason, Keith Busch,
linux-xfs
On Wed, Jan 28, 2026 at 06:56:22AM +0100, Christoph Hellwig wrote:
> ... and I clearly can't spell "separate". Carlos, if this goes
> ahead, can you fix it up? Otherwise I'll do it for the next version.
No problem.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer
2026-01-27 15:10 ` [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer Christoph Hellwig
2026-01-28 5:35 ` Darrick J. Wong
2026-01-28 5:56 ` Christoph Hellwig
@ 2026-01-28 12:00 ` Hans Holmberg
2026-01-29 11:32 ` Carlos Maiolino
3 siblings, 0 replies; 11+ messages in thread
From: Hans Holmberg @ 2026-01-28 12:00 UTC (permalink / raw)
To: hch, Carlos Maiolino
Cc: Darrick J. Wong, Chris Mason, Keith Busch,
linux-xfs@vger.kernel.org
On 27/01/2026 16:10, Christoph Hellwig wrote:
> When scratch_head wraps back to 0 and scratch_tail is also 0 because no
> I/O has completed yet, the ring buffer could be mistaken for empty.
>
> Fix this by introducing a separate scratch_available member in
> struct xfs_zone_gc_data. This actually ends up simplifying the code as
> well.
>
> Reported-by: Chris Mason <clm@meta.com>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> fs/xfs/xfs_zone_gc.c | 25 +++++++++----------------
> 1 file changed, 9 insertions(+), 16 deletions(-)
>
> diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c
> index dfa6653210c7..8c08e5519bff 100644
> --- a/fs/xfs/xfs_zone_gc.c
> +++ b/fs/xfs/xfs_zone_gc.c
> @@ -131,10 +131,13 @@ struct xfs_zone_gc_data {
> /*
> * Scratchpad to buffer GC data, organized as a ring buffer over
> * discontiguous folios. scratch_head is where the buffer is filled,
> - * and scratch_tail tracks the buffer space freed.
> + * scratch_tail tracks the buffer space freed, and scratch_available
> + * counts the space available in the ring buffer between the head and
> + * the tail.
> */
> struct folio *scratch_folios[XFS_GC_NR_BUFS];
> unsigned int scratch_size;
> + unsigned int scratch_available;
> unsigned int scratch_head;
> unsigned int scratch_tail;
>
> @@ -212,6 +215,7 @@ xfs_zone_gc_data_alloc(
> goto out_free_scratch;
> }
> data->scratch_size = XFS_GC_BUF_SIZE * XFS_GC_NR_BUFS;
> + data->scratch_available = data->scratch_size;
> INIT_LIST_HEAD(&data->reading);
> INIT_LIST_HEAD(&data->writing);
> INIT_LIST_HEAD(&data->resetting);
> @@ -574,18 +578,6 @@ xfs_zone_gc_ensure_target(
> return oz;
> }
>
> -static unsigned int
> -xfs_zone_gc_scratch_available(
> - struct xfs_zone_gc_data *data)
> -{
> - if (!data->scratch_tail)
> - return data->scratch_size - data->scratch_head;
> -
> - if (!data->scratch_head)
> - return data->scratch_tail;
> - return (data->scratch_size - data->scratch_head) + data->scratch_tail;
> -}
> -
> static bool
> xfs_zone_gc_space_available(
> struct xfs_zone_gc_data *data)
> @@ -596,7 +588,7 @@ xfs_zone_gc_space_available(
> if (!oz)
> return false;
> return oz->oz_allocated < rtg_blocks(oz->oz_rtg) &&
> - xfs_zone_gc_scratch_available(data);
> + data->scratch_available;
> }
>
> static void
> @@ -625,8 +617,7 @@ xfs_zone_gc_alloc_blocks(
> if (!oz)
> return NULL;
>
> - *count_fsb = min(*count_fsb,
> - XFS_B_TO_FSB(mp, xfs_zone_gc_scratch_available(data)));
> + *count_fsb = min(*count_fsb, XFS_B_TO_FSB(mp, data->scratch_available));
>
> /*
> * Directly allocate GC blocks from the reserved pool.
> @@ -730,6 +721,7 @@ xfs_zone_gc_start_chunk(
> bio->bi_end_io = xfs_zone_gc_end_io;
> xfs_zone_gc_add_data(chunk);
> data->scratch_head = (data->scratch_head + len) % data->scratch_size;
> + data->scratch_available -= len;
>
> WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW);
> list_add_tail(&chunk->entry, &data->reading);
> @@ -862,6 +854,7 @@ xfs_zone_gc_finish_chunk(
>
> data->scratch_tail =
> (data->scratch_tail + chunk->len) % data->scratch_size;
> + data->scratch_available += chunk->len;
>
> /*
> * Cycle through the iolock and wait for direct I/O and layouts to
Nice!
Reviewed-by: Hans Holmberg <hans.holmberg@wdc.com>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/2] xfs: remove xfs_zone_gc_space_available
2026-01-27 15:10 ` [PATCH 2/2] xfs: remove xfs_zone_gc_space_available Christoph Hellwig
@ 2026-01-28 12:02 ` Hans Holmberg
0 siblings, 0 replies; 11+ messages in thread
From: Hans Holmberg @ 2026-01-28 12:02 UTC (permalink / raw)
To: hch, Carlos Maiolino
Cc: Darrick J. Wong, Chris Mason, Keith Busch,
linux-xfs@vger.kernel.org
On 27/01/2026 16:10, Christoph Hellwig wrote:
> xfs_zone_gc_space_available only has one caller left, so fold it into
> that. Reorder the checks so that the cheaper scratch_available check
> is done first.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> fs/xfs/xfs_zone_gc.c | 21 +++++++--------------
> 1 file changed, 7 insertions(+), 14 deletions(-)
>
> diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c
> index 8c08e5519bff..7bdc5043cc1a 100644
> --- a/fs/xfs/xfs_zone_gc.c
> +++ b/fs/xfs/xfs_zone_gc.c
> @@ -578,19 +578,6 @@ xfs_zone_gc_ensure_target(
> return oz;
> }
>
> -static bool
> -xfs_zone_gc_space_available(
> - struct xfs_zone_gc_data *data)
> -{
> - struct xfs_open_zone *oz;
> -
> - oz = xfs_zone_gc_ensure_target(data->mp);
> - if (!oz)
> - return false;
> - return oz->oz_allocated < rtg_blocks(oz->oz_rtg) &&
> - data->scratch_available;
> -}
> -
> static void
> xfs_zone_gc_end_io(
> struct bio *bio)
> @@ -989,9 +976,15 @@ static bool
> xfs_zone_gc_should_start_new_work(
> struct xfs_zone_gc_data *data)
> {
> + struct xfs_open_zone *oz;
> +
> if (xfs_is_shutdown(data->mp))
> return false;
> - if (!xfs_zone_gc_space_available(data))
> + if (!data->scratch_available)
> + return false;
> +
> + oz = xfs_zone_gc_ensure_target(data->mp);
> + if (!oz || oz->oz_allocated == rtg_blocks(oz->oz_rtg))
> return false;
>
> if (!data->iter.victim_rtg) {
Looks good,
Reviewed-by: Hans Holmberg <hans.holmberg@wdc.com>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer
2026-01-27 15:10 ` [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer Christoph Hellwig
` (2 preceding siblings ...)
2026-01-28 12:00 ` Hans Holmberg
@ 2026-01-29 11:32 ` Carlos Maiolino
3 siblings, 0 replies; 11+ messages in thread
From: Carlos Maiolino @ 2026-01-29 11:32 UTC (permalink / raw)
To: Christoph Hellwig
Cc: Hans Holmberg, Darrick J. Wong, Chris Mason, Keith Busch,
linux-xfs
On Tue, 27 Jan 2026 16:10:20 +0100, Christoph Hellwig wrote:
> When scratch_head wraps back to 0 and scratch_tail is also 0 because no
> I/O has completed yet, the ring buffer could be mistaken for empty.
>
> Fix this by introducing a separate scratch_available member in
> struct xfs_zone_gc_data. This actually ends up simplifying the code as
> well.
>
> [...]
Applied to for-next, thanks!
[1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer
commit: c17a1c03493bee4e7882ac79a52b8150cb464e56
[2/2] xfs: remove xfs_zone_gc_space_available
commit: 7da4ebea8332e6b2fb15edc71e5443c15826af49
Best regards,
--
Carlos Maiolino <cem@kernel.org>
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2026-01-29 11:32 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-01-27 15:10 zone GC ringbuffer fix and cleanup Christoph Hellwig
2026-01-27 15:10 ` [PATCH 1/2] xfs: use a seprate member to track space availabe in the GC scatch buffer Christoph Hellwig
2026-01-28 5:35 ` Darrick J. Wong
2026-01-28 5:52 ` Christoph Hellwig
2026-01-28 5:56 ` Christoph Hellwig
2026-01-28 10:09 ` Carlos Maiolino
2026-01-28 12:00 ` Hans Holmberg
2026-01-29 11:32 ` Carlos Maiolino
2026-01-27 15:10 ` [PATCH 2/2] xfs: remove xfs_zone_gc_space_available Christoph Hellwig
2026-01-28 12:02 ` Hans Holmberg
2026-01-27 15:25 ` zone GC ringbuffer fix and cleanup Keith Busch
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox