From: "Darrick J. Wong" <darrick.wong@oracle.com>
To: darrick.wong@oracle.com
Cc: linux-xfs@vger.kernel.org, Dave Chinner <dchinner@redhat.com>
Subject: [PATCH 4/8] xfs: hoist inode cluster checks out of loop
Date: Mon, 31 Dec 2018 18:08:52 -0800 [thread overview]
Message-ID: <154630853205.14372.8548651680958822032.stgit@magnolia> (raw)
In-Reply-To: <154630850739.14372.14000129432637481206.stgit@magnolia>
From: Darrick J. Wong <darrick.wong@oracle.com>
Hoist the inode cluster checks out of the inobt record check loop into
a separate function in preparation for refactoring of that loop. No
functional changes here; that's in the next patch.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
---
fs/xfs/scrub/ialloc.c | 119 +++++++++++++++++++++++++++----------------------
1 file changed, 65 insertions(+), 54 deletions(-)
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 5e593e4292b2..9af01ea0258d 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -188,19 +188,19 @@ xchk_iallocbt_check_cluster_freemask(
return 0;
}
-/* Make sure the free mask is consistent with what the inodes think. */
+/* Check an inode cluster. */
STATIC int
-xchk_iallocbt_check_freemask(
+xchk_iallocbt_check_cluster(
struct xchk_btree *bs,
- struct xfs_inobt_rec_incore *irec)
+ struct xfs_inobt_rec_incore *irec,
+ xfs_agino_t agino)
{
struct xfs_imap imap;
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_dinode *dip;
struct xfs_buf *bp;
xfs_ino_t fsino;
- xfs_agino_t nr_inodes;
- xfs_agino_t agino;
+ unsigned int nr_inodes;
xfs_agino_t chunkino;
xfs_agino_t clusterino;
xfs_agblock_t agbno;
@@ -212,60 +212,71 @@ xchk_iallocbt_check_freemask(
nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
mp->m_inodes_per_cluster);
- for (agino = irec->ir_startino;
- agino < irec->ir_startino + XFS_INODES_PER_CHUNK;
- agino += mp->m_inodes_per_cluster) {
- fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
- chunkino = agino - irec->ir_startino;
- agbno = XFS_AGINO_TO_AGBNO(mp, agino);
-
- /* Compute the holemask mask for this cluster. */
- for (clusterino = 0, holemask = 0; clusterino < nr_inodes;
- clusterino += XFS_INODES_PER_HOLEMASK_BIT)
- holemask |= XFS_INOBT_MASK((chunkino + clusterino) /
- XFS_INODES_PER_HOLEMASK_BIT);
-
- /* The whole cluster must be a hole or not a hole. */
- ir_holemask = (irec->ir_holemask & holemask);
- if (ir_holemask != holemask && ir_holemask != 0) {
- xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
- continue;
- }
+ fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
+ chunkino = agino - irec->ir_startino;
+ agbno = XFS_AGINO_TO_AGBNO(mp, agino);
- /* If any part of this is a hole, skip it. */
- if (ir_holemask) {
- xchk_xref_is_not_owned_by(bs->sc, agbno,
- mp->m_blocks_per_cluster,
- &XFS_RMAP_OINFO_INODES);
- continue;
- }
+ /* Compute the holemask mask for this cluster. */
+ for (clusterino = 0, holemask = 0; clusterino < nr_inodes;
+ clusterino += XFS_INODES_PER_HOLEMASK_BIT)
+ holemask |= XFS_INOBT_MASK((chunkino + clusterino) /
+ XFS_INODES_PER_HOLEMASK_BIT);
- xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
+ /* The whole cluster must be a hole or not a hole. */
+ ir_holemask = (irec->ir_holemask & holemask);
+ if (ir_holemask != holemask && ir_holemask != 0) {
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ return 0;
+ }
+
+ /* If any part of this is a hole, skip it. */
+ if (ir_holemask) {
+ xchk_xref_is_not_owned_by(bs->sc, agbno,
+ mp->m_blocks_per_cluster,
&XFS_RMAP_OINFO_INODES);
+ return 0;
+ }
- /* Grab the inode cluster buffer. */
- imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno,
- agbno);
- imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
- imap.im_boffset = 0;
-
- error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
- &dip, &bp, 0, 0);
- if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0,
- &error))
- continue;
-
- /* Which inodes are free? */
- for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
- error = xchk_iallocbt_check_cluster_freemask(bs,
- fsino, chunkino, clusterino, irec, bp);
- if (error) {
- xfs_trans_brelse(bs->cur->bc_tp, bp);
- return error;
- }
- }
+ xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
+ &XFS_RMAP_OINFO_INODES);
+
+ /* Grab the inode cluster buffer. */
+ imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno, agbno);
+ imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
+ imap.im_boffset = 0;
- xfs_trans_brelse(bs->cur->bc_tp, bp);
+ error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &bp, 0, 0);
+ if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
+ return 0;
+
+ /* Which inodes are free? */
+ for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
+ error = xchk_iallocbt_check_cluster_freemask(bs, fsino,
+ chunkino, clusterino, irec, bp);
+ if (error)
+ break;
+ }
+
+ xfs_trans_brelse(bs->cur->bc_tp, bp);
+ return error;
+}
+
+/* Make sure the free mask is consistent with what the inodes think. */
+STATIC int
+xchk_iallocbt_check_freemask(
+ struct xchk_btree *bs,
+ struct xfs_inobt_rec_incore *irec)
+{
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_agino_t agino;
+ int error = 0;
+
+ for (agino = irec->ir_startino;
+ agino < irec->ir_startino + XFS_INODES_PER_CHUNK;
+ agino += mp->m_blocks_per_cluster * mp->m_sb.sb_inopblock) {
+ error = xchk_iallocbt_check_cluster(bs, irec, agino);
+ if (error)
+ break;
}
return error;
next prev parent reply other threads:[~2019-01-01 2:08 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-01-01 2:08 [PATCH 0/8] xfs: inode scrubber fixes Darrick J. Wong
2019-01-01 2:08 ` [PATCH 1/8] xfs: never try to scrub more than 64 inodes per inobt record Darrick J. Wong
2019-01-02 12:39 ` Carlos Maiolino
2019-01-02 13:30 ` Chandan Rajendra
2019-01-04 18:30 ` Brian Foster
2019-01-01 2:08 ` [PATCH 2/8] xfs: check the ir_startino alignment directly Darrick J. Wong
2019-01-04 18:31 ` Brian Foster
2019-01-04 20:59 ` Darrick J. Wong
2019-01-07 13:45 ` Brian Foster
2019-01-08 1:43 ` Darrick J. Wong
2019-01-08 12:47 ` Brian Foster
2019-01-08 18:28 ` Darrick J. Wong
2019-01-08 19:00 ` Brian Foster
2019-01-01 2:08 ` [PATCH 3/8] xfs: check inobt record alignment on big block filesystems Darrick J. Wong
2019-01-04 18:31 ` Brian Foster
2019-01-01 2:08 ` Darrick J. Wong [this message]
2019-01-04 18:31 ` [PATCH 4/8] xfs: hoist inode cluster checks out of loop Brian Foster
2019-01-01 2:08 ` [PATCH 5/8] xfs: clean up the inode cluster checking in the inobt scrub Darrick J. Wong
2019-01-04 18:32 ` Brian Foster
2019-01-04 22:02 ` Darrick J. Wong
2019-01-01 2:09 ` [PATCH 6/8] xfs: scrub big block inode btrees correctly Darrick J. Wong
2019-01-04 18:38 ` Brian Foster
2019-01-05 0:29 ` Darrick J. Wong
2019-01-07 13:45 ` Brian Foster
2019-01-08 2:03 ` Darrick J. Wong
2019-01-01 2:09 ` [PATCH 7/8] xfs: abort xattr scrub if fatal signals are pending Darrick J. Wong
2019-01-04 18:39 ` Brian Foster
2019-01-01 2:09 ` [PATCH 8/8] xfs: scrub should flag dir/attr offsets that aren't mappable with xfs_dablk_t Darrick J. Wong
2019-01-04 18:39 ` Brian Foster
2019-01-04 23:09 ` Darrick J. Wong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=154630853205.14372.8548651680958822032.stgit@magnolia \
--to=darrick.wong@oracle.com \
--cc=dchinner@redhat.com \
--cc=linux-xfs@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).