* [bug report] btrfs: introduce dedicated helper to scrub simple-mirror based range
@ 2022-01-26 14:23 Dan Carpenter
0 siblings, 0 replies; 2+ messages in thread
From: Dan Carpenter @ 2022-01-26 14:23 UTC (permalink / raw)
To: wqu; +Cc: linux-btrfs
Hello Qu Wenruo,
The patch b5b99b1e0296: "btrfs: introduce dedicated helper to scrub
simple-mirror based range" from Jan 7, 2022, leads to the following
Smatch static checker warning:
fs/btrfs/scrub.c:3678 scrub_stripe() error: uninitialized symbol 'offset'.
fs/btrfs/scrub.c:3680 scrub_stripe() error: uninitialized symbol 'physical_end'.
fs/btrfs/scrub.c
3513 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3514 struct btrfs_block_group *bg,
3515 struct map_lookup *map,
3516 struct btrfs_device *scrub_dev,
3517 int stripe_index, u64 dev_extent_len)
3518 {
3519 struct btrfs_path *path;
3520 struct btrfs_fs_info *fs_info = sctx->fs_info;
3521 struct btrfs_root *root;
3522 struct btrfs_root *csum_root;
3523 struct blk_plug plug;
3524 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
3525 const u64 chunk_logical = bg->start;
3526 int ret;
3527 u64 nstripes;
3528 u64 physical;
3529 u64 logical;
3530 u64 logic_end;
3531 u64 physical_end;
3532 u64 increment; /* The logical increment after finishing one stripe */
3533 u64 offset; /* Offset inside the chunk */
3534 u64 stripe_logical;
3535 u64 stripe_end;
3536 int stop_loop = 0;
3537
3538 path = btrfs_alloc_path();
3539 if (!path)
3540 return -ENOMEM;
3541
3542 /*
3543 * Work on commit root. The related disk blocks are static as
3544 * long as COW is applied. This means, it is save to rewrite
3545 * them to repair disk errors without any race conditions
3546 */
3547 path->search_commit_root = 1;
3548 path->skip_locking = 1;
3549 path->reada = READA_FORWARD;
3550
3551 wait_event(sctx->list_wait,
3552 atomic_read(&sctx->bios_in_flight) == 0);
3553 scrub_blocked_if_needed(fs_info);
3554
3555 root = btrfs_extent_root(fs_info, bg->start);
3556 csum_root = btrfs_csum_root(fs_info, bg->start);
3557
3558 /*
3559 * Collect all data csums for the stripe to avoid seeking during
3560 * the scrub. This might currently (crc32) end up to be about 1MB
3561 */
3562 blk_start_plug(&plug);
3563
3564 if (sctx->is_dev_replace &&
3565 btrfs_dev_is_sequential(sctx->wr_tgtdev,
3566 map->stripes[stripe_index].physical)) {
3567 mutex_lock(&sctx->wr_lock);
3568 sctx->write_pointer = map->stripes[stripe_index].physical;
3569 mutex_unlock(&sctx->wr_lock);
3570 sctx->flush_all_writes = true;
3571 }
3572
3573 /*
3574 * There used to be a big double loop to handle all profiles using the
3575 * same routine, which grows larger and more gross over time.
3576 *
3577 * So here we handle each profile differently, so simpler profiles
3578 * have simpler scrubing function.
3579 */
3580 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
3581 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
3582 /*
3583 * Above check rules out all complex profile, the remaining
3584 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
3585 * mirrored duplication without stripe.
3586 *
3587 * Only @phsyical and @mirror_num needs to calculated using
3588 * @stripe_index.
3589 */
3590 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3591 bg->start, bg->length, scrub_dev,
3592 map->stripes[stripe_index].physical,
3593 stripe_index + 1);
3594 goto out;
^^^^^^^^^
3595 }
3596 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3597 ret = scrub_simple_stripe(sctx, root, csum_root, bg, map,
3598 scrub_dev, stripe_index);
3599 goto out;
^^^^^^^^^
At this point "offset" and "physical_end" are not intialized.
3600 }
3601
3602 /* Only RAID56 goes through the old code */
3603 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
3604
3605 physical = map->stripes[stripe_index].physical;
3606 offset = 0;
3607 nstripes = div64_u64(dev_extent_len, map->stripe_len);
3608 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
3609 increment = map->stripe_len * nr_data_stripes(map);
3610
3611 logical = chunk_logical + offset;
3612 physical_end = physical + nstripes * map->stripe_len;
3613 get_raid56_logic_offset(physical_end, stripe_index, map, &logic_end,
3614 NULL);
3615 logic_end += chunk_logical;
3616
3617 ret = 0;
3618 /*
3619 * Due to the rotation, for RAID56 it's better to iterate each stripe
3620 * using their physical offset.
3621 */
3622 while (physical < physical_end) {
3623 ret = get_raid56_logic_offset(physical, stripe_index, map,
3624 &logical, &stripe_logical);
3625 logical += chunk_logical;
3626 if (ret) {
3627 /* It is parity strip */
3628 stripe_logical += chunk_logical;
3629 stripe_end = stripe_logical + increment;
3630 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3631 stripe_logical,
3632 stripe_end);
3633 if (ret)
3634 goto out;
3635 goto next;
3636 }
3637
3638 /*
3639 * Now we're at data stripes, scrub each extents in the range.
3640 *
3641 * At this stage, if we ignore the repair part, each data stripe
3642 * is no different than SINGLE profile.
3643 * We can reuse scrub_simple_mirror() here, as the repair part
3644 * is still based on @mirror_num.
3645 */
3646 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3647 logical, map->stripe_len,
3648 scrub_dev, physical, 1);
3649 if (ret < 0)
3650 goto out;
3651 next:
3652 logical += increment;
3653 physical += map->stripe_len;
3654 spin_lock(&sctx->stat_lock);
3655 if (stop_loop)
3656 sctx->stat.last_physical = map->stripes[stripe_index].physical +
3657 dev_extent_len;
3658 else
3659 sctx->stat.last_physical = physical;
3660 spin_unlock(&sctx->stat_lock);
3661 if (stop_loop)
3662 break;
3663 }
3664 out:
3665 /* push queued extents */
3666 scrub_submit(sctx);
3667 mutex_lock(&sctx->wr_lock);
3668 scrub_wr_submit(sctx);
3669 mutex_unlock(&sctx->wr_lock);
3670
3671 blk_finish_plug(&plug);
3672 btrfs_free_path(path);
3673
3674 if (sctx->is_dev_replace && ret >= 0) {
3675 int ret2;
3676
3677 ret2 = sync_write_pointer_for_zoned(sctx,
--> 3678 chunk_logical + offset,
^^^^^^
3679 map->stripes[stripe_index].physical,
3680 physical_end);
^^^^^^^^^^^^
3681 if (ret2)
3682 ret = ret2;
3683 }
3684
3685 return ret < 0 ? ret : 0;
3686 }
regards,
dan carpenter
^ permalink raw reply [flat|nested] 2+ messages in thread
* [bug report] btrfs: introduce dedicated helper to scrub simple-mirror based range
@ 2022-01-27 8:52 Dan Carpenter
0 siblings, 0 replies; 2+ messages in thread
From: Dan Carpenter @ 2022-01-27 8:52 UTC (permalink / raw)
To: wqu; +Cc: linux-btrfs
Hello Qu Wenruo,
The patch b5b99b1e0296: "btrfs: introduce dedicated helper to scrub
simple-mirror based range" from Jan 7, 2022, leads to the following
Smatch static checker warning:
fs/btrfs/scrub.c:3678 scrub_stripe()
error: uninitialized symbol 'offset'.
fs/btrfs/scrub.c
3513 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3514 struct btrfs_block_group *bg,
3515 struct map_lookup *map,
3516 struct btrfs_device *scrub_dev,
3517 int stripe_index, u64 dev_extent_len)
3518 {
3519 struct btrfs_path *path;
3520 struct btrfs_fs_info *fs_info = sctx->fs_info;
3521 struct btrfs_root *root;
3522 struct btrfs_root *csum_root;
3523 struct blk_plug plug;
3524 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
3525 const u64 chunk_logical = bg->start;
3526 int ret;
3527 u64 nstripes;
3528 u64 physical;
3529 u64 logical;
3530 u64 logic_end;
3531 u64 physical_end;
3532 u64 increment; /* The logical increment after finishing one stripe */
3533 u64 offset; /* Offset inside the chunk */
^^^^^^^^^^
3534 u64 stripe_logical;
3535 u64 stripe_end;
3536 int stop_loop = 0;
3537
3538 path = btrfs_alloc_path();
3539 if (!path)
3540 return -ENOMEM;
3541
3542 /*
3543 * Work on commit root. The related disk blocks are static as
3544 * long as COW is applied. This means, it is save to rewrite
3545 * them to repair disk errors without any race conditions
3546 */
3547 path->search_commit_root = 1;
3548 path->skip_locking = 1;
3549 path->reada = READA_FORWARD;
3550
3551 wait_event(sctx->list_wait,
3552 atomic_read(&sctx->bios_in_flight) == 0);
3553 scrub_blocked_if_needed(fs_info);
3554
3555 root = btrfs_extent_root(fs_info, bg->start);
3556 csum_root = btrfs_csum_root(fs_info, bg->start);
3557
3558 /*
3559 * Collect all data csums for the stripe to avoid seeking during
3560 * the scrub. This might currently (crc32) end up to be about 1MB
3561 */
3562 blk_start_plug(&plug);
3563
3564 if (sctx->is_dev_replace &&
3565 btrfs_dev_is_sequential(sctx->wr_tgtdev,
3566 map->stripes[stripe_index].physical)) {
3567 mutex_lock(&sctx->wr_lock);
3568 sctx->write_pointer = map->stripes[stripe_index].physical;
3569 mutex_unlock(&sctx->wr_lock);
3570 sctx->flush_all_writes = true;
3571 }
3572
3573 /*
3574 * There used to be a big double loop to handle all profiles using the
3575 * same routine, which grows larger and more gross over time.
3576 *
3577 * So here we handle each profile differently, so simpler profiles
3578 * have simpler scrubing function.
3579 */
3580 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
3581 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
3582 /*
3583 * Above check rules out all complex profile, the remaining
3584 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
3585 * mirrored duplication without stripe.
3586 *
3587 * Only @phsyical and @mirror_num needs to calculated using
3588 * @stripe_index.
3589 */
3590 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3591 bg->start, bg->length, scrub_dev,
3592 map->stripes[stripe_index].physical,
3593 stripe_index + 1);
3594 goto out;
^^^^^^^^^
3595 }
3596 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3597 ret = scrub_simple_stripe(sctx, root, csum_root, bg, map,
3598 scrub_dev, stripe_index);
3599 goto out;
^^^^^^^^^
These two gotos before offset has been set to zero.
3600 }
3601
3602 /* Only RAID56 goes through the old code */
3603 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
3604
3605 physical = map->stripes[stripe_index].physical;
3606 offset = 0;
3607 nstripes = div64_u64(dev_extent_len, map->stripe_len);
3608 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
3609 increment = map->stripe_len * nr_data_stripes(map);
3610
3611 logical = chunk_logical + offset;
3612 physical_end = physical + nstripes * map->stripe_len;
3613 get_raid56_logic_offset(physical_end, stripe_index, map, &logic_end,
3614 NULL);
3615 logic_end += chunk_logical;
3616
3617 ret = 0;
3618 /*
3619 * Due to the rotation, for RAID56 it's better to iterate each stripe
3620 * using their physical offset.
3621 */
3622 while (physical < physical_end) {
3623 ret = get_raid56_logic_offset(physical, stripe_index, map,
3624 &logical, &stripe_logical);
3625 logical += chunk_logical;
3626 if (ret) {
3627 /* It is parity strip */
3628 stripe_logical += chunk_logical;
3629 stripe_end = stripe_logical + increment;
3630 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3631 stripe_logical,
3632 stripe_end);
3633 if (ret)
3634 goto out;
3635 goto next;
3636 }
3637
3638 /*
3639 * Now we're at data stripes, scrub each extents in the range.
3640 *
3641 * At this stage, if we ignore the repair part, each data stripe
3642 * is no different than SINGLE profile.
3643 * We can reuse scrub_simple_mirror() here, as the repair part
3644 * is still based on @mirror_num.
3645 */
3646 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3647 logical, map->stripe_len,
3648 scrub_dev, physical, 1);
3649 if (ret < 0)
3650 goto out;
3651 next:
3652 logical += increment;
3653 physical += map->stripe_len;
3654 spin_lock(&sctx->stat_lock);
3655 if (stop_loop)
3656 sctx->stat.last_physical = map->stripes[stripe_index].physical +
3657 dev_extent_len;
3658 else
3659 sctx->stat.last_physical = physical;
3660 spin_unlock(&sctx->stat_lock);
3661 if (stop_loop)
3662 break;
3663 }
3664 out:
3665 /* push queued extents */
3666 scrub_submit(sctx);
3667 mutex_lock(&sctx->wr_lock);
3668 scrub_wr_submit(sctx);
3669 mutex_unlock(&sctx->wr_lock);
3670
3671 blk_finish_plug(&plug);
3672 btrfs_free_path(path);
3673
3674 if (sctx->is_dev_replace && ret >= 0) {
3675 int ret2;
3676
3677 ret2 = sync_write_pointer_for_zoned(sctx,
--> 3678 chunk_logical + offset,
3679 map->stripes[stripe_index].physical,
3680 physical_end);
3681 if (ret2)
3682 ret = ret2;
3683 }
3684
3685 return ret < 0 ? ret : 0;
3686 }
regards,
dan carpenter
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2022-01-27 8:52 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-01-27 8:52 [bug report] btrfs: introduce dedicated helper to scrub simple-mirror based range Dan Carpenter
-- strict thread matches above, loose matches on Subject: below --
2022-01-26 14:23 Dan Carpenter
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).