* [PATCH] btrfs: extract the parity scrub code into a helper
@ 2025-11-06 3:02 Qu Wenruo
2025-11-06 9:02 ` David Sterba
0 siblings, 1 reply; 3+ messages in thread
From: Qu Wenruo @ 2025-11-06 3:02 UTC (permalink / raw)
To: linux-btrfs
The function scrub_raid56_parity_stripe() is handling the partity stripe
by the following steps:
- Scrub each data stripes
And make sure everything is fine in each data stripe
- Cache the data stripe into the raid bio
- Use the cached raid bio to scrub the target parity stripe
Extract the last two steps into a new helper,
scrub_radi56_cached_parity(), as a cleanup and make the error handling
more straightforward.
With the following minor cleanups:
- Use on-stack bio structure
The bio is always empty thus we do not need any bio vector nor the
block device. Thus there is no need to allocate a bio, the on-stack
one is more than enough to cut it.
- Remove the unnecessary btrfs_put_bioc() call if btrfs_map_block()
failed
If btrfs_map_block() is failed, @bioc_ret will not be touched thus
there is no need to call btrfs_put_bioc() in this case.
- Use a proper out: tag to do the cleanup
Now the error cleanup is much shorter and simpler, just
btrfs_bio_counter_dec() and bio_uninit().
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
fs/btrfs/scrub.c | 90 ++++++++++++++++++++++++++++--------------------
1 file changed, 52 insertions(+), 38 deletions(-)
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index e3612202ba55..8c360d941bd5 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2113,6 +2113,56 @@ static int should_cancel_scrub(const struct scrub_ctx *sctx)
return 0;
}
+static int scrub_raid56_cached_parity(struct scrub_ctx *sctx,
+ struct btrfs_device *scrub_dev,
+ struct btrfs_chunk_map *map,
+ u64 full_stripe_start,
+ unsigned long *extent_bitmap)
+{
+ DECLARE_COMPLETION_ONSTACK(io_done);
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct btrfs_io_context *bioc = NULL;
+ struct btrfs_raid_bio *rbio;
+ struct bio bio;
+ const int data_stripes = nr_data_stripes(map);
+ u64 length = btrfs_stripe_nr_to_offset(data_stripes);
+ int ret;
+
+ bio_init(&bio, NULL, NULL, 0, REQ_OP_READ);
+ bio.bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
+ bio.bi_private = &io_done;
+ bio.bi_end_io = raid56_scrub_wait_endio;
+
+ btrfs_bio_counter_inc_blocked(fs_info);
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
+ &length, &bioc, NULL, NULL);
+ if (ret < 0)
+ goto out;
+ /* For RAID56 write there must be an @bioc allocated. */
+ ASSERT(bioc);
+ rbio = raid56_parity_alloc_scrub_rbio(&bio, bioc, scrub_dev, extent_bitmap,
+ BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
+ btrfs_put_bioc(bioc);
+ if (!rbio) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Use the recovered stripes as cache to avoid read them from disk again. */
+ for (int i = 0; i < data_stripes; i++) {
+ struct scrub_stripe *stripe = &sctx->raid56_data_stripes[i];
+
+ raid56_parity_cache_data_folios(rbio, stripe->folios,
+ full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
+ }
+ raid56_parity_submit_scrub_rbio(rbio);
+ wait_for_completion_io(&io_done);
+ ret = blk_status_to_errno(bio.bi_status);
+out:
+ btrfs_bio_counter_dec(fs_info);
+ bio_uninit(&bio);
+ return ret;
+}
+
static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev,
struct btrfs_block_group *bg,
@@ -2121,16 +2171,12 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
{
DECLARE_COMPLETION_ONSTACK(io_done);
struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct btrfs_raid_bio *rbio;
- struct btrfs_io_context *bioc = NULL;
struct btrfs_path extent_path = { 0 };
struct btrfs_path csum_path = { 0 };
- struct bio *bio;
struct scrub_stripe *stripe;
bool all_empty = true;
const int data_stripes = nr_data_stripes(map);
unsigned long extent_bitmap = 0;
- u64 length = btrfs_stripe_nr_to_offset(data_stripes);
int ret;
ASSERT(sctx->raid56_data_stripes);
@@ -2252,40 +2298,8 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
}
/* Now we can check and regenerate the P/Q stripe. */
- bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
- bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
- bio->bi_private = &io_done;
- bio->bi_end_io = raid56_scrub_wait_endio;
-
- btrfs_bio_counter_inc_blocked(fs_info);
- ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
- &length, &bioc, NULL, NULL);
- if (ret < 0) {
- btrfs_put_bioc(bioc);
- btrfs_bio_counter_dec(fs_info);
- goto out;
- }
- rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
- BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
- btrfs_put_bioc(bioc);
- if (!rbio) {
- ret = -ENOMEM;
- btrfs_bio_counter_dec(fs_info);
- goto out;
- }
- /* Use the recovered stripes as cache to avoid read them from disk again. */
- for (int i = 0; i < data_stripes; i++) {
- stripe = &sctx->raid56_data_stripes[i];
-
- raid56_parity_cache_data_folios(rbio, stripe->folios,
- full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
- }
- raid56_parity_submit_scrub_rbio(rbio);
- wait_for_completion_io(&io_done);
- ret = blk_status_to_errno(bio->bi_status);
- bio_put(bio);
- btrfs_bio_counter_dec(fs_info);
-
+ ret = scrub_raid56_cached_parity(sctx, scrub_dev, map, full_stripe_start,
+ &extent_bitmap);
out:
btrfs_release_path(&extent_path);
btrfs_release_path(&csum_path);
--
2.51.2
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] btrfs: extract the parity scrub code into a helper
2025-11-06 3:02 [PATCH] btrfs: extract the parity scrub code into a helper Qu Wenruo
@ 2025-11-06 9:02 ` David Sterba
2025-11-06 9:24 ` Qu Wenruo
0 siblings, 1 reply; 3+ messages in thread
From: David Sterba @ 2025-11-06 9:02 UTC (permalink / raw)
To: Qu Wenruo; +Cc: linux-btrfs
On Thu, Nov 06, 2025 at 01:32:05PM +1030, Qu Wenruo wrote:
> The function scrub_raid56_parity_stripe() is handling the partity stripe
> by the following steps:
>
> - Scrub each data stripes
> And make sure everything is fine in each data stripe
>
> - Cache the data stripe into the raid bio
>
> - Use the cached raid bio to scrub the target parity stripe
>
> Extract the last two steps into a new helper,
> scrub_radi56_cached_parity(), as a cleanup and make the error handling
> more straightforward.
>
> With the following minor cleanups:
>
> - Use on-stack bio structure
> The bio is always empty thus we do not need any bio vector nor the
> block device. Thus there is no need to allocate a bio, the on-stack
> one is more than enough to cut it.
>
> - Remove the unnecessary btrfs_put_bioc() call if btrfs_map_block()
> failed
> If btrfs_map_block() is failed, @bioc_ret will not be touched thus
> there is no need to call btrfs_put_bioc() in this case.
>
> - Use a proper out: tag to do the cleanup
> Now the error cleanup is much shorter and simpler, just
> btrfs_bio_counter_dec() and bio_uninit().
>
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> ---
> fs/btrfs/scrub.c | 90 ++++++++++++++++++++++++++++--------------------
> 1 file changed, 52 insertions(+), 38 deletions(-)
>
> diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
> index e3612202ba55..8c360d941bd5 100644
> --- a/fs/btrfs/scrub.c
> +++ b/fs/btrfs/scrub.c
> @@ -2113,6 +2113,56 @@ static int should_cancel_scrub(const struct scrub_ctx *sctx)
> return 0;
> }
>
> +static int scrub_raid56_cached_parity(struct scrub_ctx *sctx,
> + struct btrfs_device *scrub_dev,
> + struct btrfs_chunk_map *map,
> + u64 full_stripe_start,
> + unsigned long *extent_bitmap)
> +{
> + DECLARE_COMPLETION_ONSTACK(io_done);
> + struct btrfs_fs_info *fs_info = sctx->fs_info;
> + struct btrfs_io_context *bioc = NULL;
> + struct btrfs_raid_bio *rbio;
> + struct bio bio;
> + const int data_stripes = nr_data_stripes(map);
> + u64 length = btrfs_stripe_nr_to_offset(data_stripes);
> + int ret;
> +
> + bio_init(&bio, NULL, NULL, 0, REQ_OP_READ);
> + bio.bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
> + bio.bi_private = &io_done;
> + bio.bi_end_io = raid56_scrub_wait_endio;
> +
> + btrfs_bio_counter_inc_blocked(fs_info);
> + ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
> + &length, &bioc, NULL, NULL);
> + if (ret < 0)
> + goto out;
> + /* For RAID56 write there must be an @bioc allocated. */
> + ASSERT(bioc);
> + rbio = raid56_parity_alloc_scrub_rbio(&bio, bioc, scrub_dev, extent_bitmap,
> + BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
> + btrfs_put_bioc(bioc);
> + if (!rbio) {
> + ret = -ENOMEM;
> + goto out;
> + }
> + /* Use the recovered stripes as cache to avoid read them from disk again. */
> + for (int i = 0; i < data_stripes; i++) {
> + struct scrub_stripe *stripe = &sctx->raid56_data_stripes[i];
> +
> + raid56_parity_cache_data_folios(rbio, stripe->folios,
> + full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
> + }
> + raid56_parity_submit_scrub_rbio(rbio);
> + wait_for_completion_io(&io_done);
> + ret = blk_status_to_errno(bio.bi_status);
> +out:
> + btrfs_bio_counter_dec(fs_info);
> + bio_uninit(&bio);
> + return ret;
> +}
> +
> static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
> struct btrfs_device *scrub_dev,
> struct btrfs_block_group *bg,
> @@ -2121,16 +2171,12 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
> {
> DECLARE_COMPLETION_ONSTACK(io_done);
This should be deleted as well, as it's in scrub_raid56_cached_parity()
The stack meter says that the new function adds 240 bytes (and has
dynamic stack size) while scrub_raid56_parity_stripe() shrinks only by
24 bytes. So this basically adds 240 - 24 = 216 bytes to the stack.
With the completion removed is another -32 bytes it's down to 184. The
on-stack bio is 112 bytes from that, 184 - 112 = 72 for remaining
variables.
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] btrfs: extract the parity scrub code into a helper
2025-11-06 9:02 ` David Sterba
@ 2025-11-06 9:24 ` Qu Wenruo
0 siblings, 0 replies; 3+ messages in thread
From: Qu Wenruo @ 2025-11-06 9:24 UTC (permalink / raw)
To: dsterba; +Cc: linux-btrfs
在 2025/11/6 19:32, David Sterba 写道:
> On Thu, Nov 06, 2025 at 01:32:05PM +1030, Qu Wenruo wrote:
>> The function scrub_raid56_parity_stripe() is handling the partity stripe
>> by the following steps:
>>
>> - Scrub each data stripes
>> And make sure everything is fine in each data stripe
>>
>> - Cache the data stripe into the raid bio
>>
>> - Use the cached raid bio to scrub the target parity stripe
>>
>> Extract the last two steps into a new helper,
>> scrub_radi56_cached_parity(), as a cleanup and make the error handling
>> more straightforward.
>>
>> With the following minor cleanups:
>>
>> - Use on-stack bio structure
>> The bio is always empty thus we do not need any bio vector nor the
>> block device. Thus there is no need to allocate a bio, the on-stack
>> one is more than enough to cut it.
>>
>> - Remove the unnecessary btrfs_put_bioc() call if btrfs_map_block()
>> failed
>> If btrfs_map_block() is failed, @bioc_ret will not be touched thus
>> there is no need to call btrfs_put_bioc() in this case.
>>
>> - Use a proper out: tag to do the cleanup
>> Now the error cleanup is much shorter and simpler, just
>> btrfs_bio_counter_dec() and bio_uninit().
>>
>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>> ---
>> fs/btrfs/scrub.c | 90 ++++++++++++++++++++++++++++--------------------
>> 1 file changed, 52 insertions(+), 38 deletions(-)
>>
>> diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
>> index e3612202ba55..8c360d941bd5 100644
>> --- a/fs/btrfs/scrub.c
>> +++ b/fs/btrfs/scrub.c
>> @@ -2113,6 +2113,56 @@ static int should_cancel_scrub(const struct scrub_ctx *sctx)
>> return 0;
>> }
>>
>> +static int scrub_raid56_cached_parity(struct scrub_ctx *sctx,
>> + struct btrfs_device *scrub_dev,
>> + struct btrfs_chunk_map *map,
>> + u64 full_stripe_start,
>> + unsigned long *extent_bitmap)
>> +{
>> + DECLARE_COMPLETION_ONSTACK(io_done);
>> + struct btrfs_fs_info *fs_info = sctx->fs_info;
>> + struct btrfs_io_context *bioc = NULL;
>> + struct btrfs_raid_bio *rbio;
>> + struct bio bio;
>> + const int data_stripes = nr_data_stripes(map);
>> + u64 length = btrfs_stripe_nr_to_offset(data_stripes);
>> + int ret;
>> +
>> + bio_init(&bio, NULL, NULL, 0, REQ_OP_READ);
>> + bio.bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
>> + bio.bi_private = &io_done;
>> + bio.bi_end_io = raid56_scrub_wait_endio;
>> +
>> + btrfs_bio_counter_inc_blocked(fs_info);
>> + ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
>> + &length, &bioc, NULL, NULL);
>> + if (ret < 0)
>> + goto out;
>> + /* For RAID56 write there must be an @bioc allocated. */
>> + ASSERT(bioc);
>> + rbio = raid56_parity_alloc_scrub_rbio(&bio, bioc, scrub_dev, extent_bitmap,
>> + BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
>> + btrfs_put_bioc(bioc);
>> + if (!rbio) {
>> + ret = -ENOMEM;
>> + goto out;
>> + }
>> + /* Use the recovered stripes as cache to avoid read them from disk again. */
>> + for (int i = 0; i < data_stripes; i++) {
>> + struct scrub_stripe *stripe = &sctx->raid56_data_stripes[i];
>> +
>> + raid56_parity_cache_data_folios(rbio, stripe->folios,
>> + full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
>> + }
>> + raid56_parity_submit_scrub_rbio(rbio);
>> + wait_for_completion_io(&io_done);
>> + ret = blk_status_to_errno(bio.bi_status);
>> +out:
>> + btrfs_bio_counter_dec(fs_info);
>> + bio_uninit(&bio);
>> + return ret;
>> +}
>> +
>> static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
>> struct btrfs_device *scrub_dev,
>> struct btrfs_block_group *bg,
>> @@ -2121,16 +2171,12 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
>> {
>> DECLARE_COMPLETION_ONSTACK(io_done);
>
> This should be deleted as well, as it's in scrub_raid56_cached_parity()
Right, will sent out a v2 just in case.
As I mostly rely on b4 to handle the tags, only updating it locally
won't make it persistent here.
Thanks,
Qu
>
> The stack meter says that the new function adds 240 bytes (and has
> dynamic stack size) while scrub_raid56_parity_stripe() shrinks only by
> 24 bytes. So this basically adds 240 - 24 = 216 bytes to the stack.
>
> With the completion removed is another -32 bytes it's down to 184. The
> on-stack bio is 112 bytes from that, 184 - 112 = 72 for remaining
> variables.
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2025-11-06 9:24 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-06 3:02 [PATCH] btrfs: extract the parity scrub code into a helper Qu Wenruo
2025-11-06 9:02 ` David Sterba
2025-11-06 9:24 ` Qu Wenruo
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox