1739a2fe0SDarrick J. Wong // SPDX-License-Identifier: GPL-2.0-or-later
221fb4cb1SDarrick J. Wong /*
3ecc73f8aSDarrick J. Wong * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
4739a2fe0SDarrick J. Wong * Author: Darrick J. Wong <djwong@kernel.org>
521fb4cb1SDarrick J. Wong */
621fb4cb1SDarrick J. Wong #include "xfs.h"
721fb4cb1SDarrick J. Wong #include "xfs_fs.h"
821fb4cb1SDarrick J. Wong #include "xfs_shared.h"
921fb4cb1SDarrick J. Wong #include "xfs_format.h"
1021fb4cb1SDarrick J. Wong #include "xfs_trans_resv.h"
1121fb4cb1SDarrick J. Wong #include "xfs_mount.h"
1221fb4cb1SDarrick J. Wong #include "xfs_btree.h"
1321fb4cb1SDarrick J. Wong #include "xfs_sb.h"
14ab9d5dc5SDarrick J. Wong #include "xfs_alloc.h"
15a12890aeSDarrick J. Wong #include "xfs_ialloc.h"
16d852657cSDarrick J. Wong #include "xfs_rmap.h"
179bbafc71SDave Chinner #include "xfs_ag.h"
1821fb4cb1SDarrick J. Wong #include "scrub/scrub.h"
1921fb4cb1SDarrick J. Wong #include "scrub/common.h"
2021fb4cb1SDarrick J. Wong
21466c525dSDarrick J. Wong int
xchk_setup_agheader(struct xfs_scrub * sc)22466c525dSDarrick J. Wong xchk_setup_agheader(
23466c525dSDarrick J. Wong struct xfs_scrub *sc)
24466c525dSDarrick J. Wong {
25466c525dSDarrick J. Wong if (xchk_need_intent_drain(sc))
26466c525dSDarrick J. Wong xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
27466c525dSDarrick J. Wong return xchk_setup_fs(sc);
28466c525dSDarrick J. Wong }
29466c525dSDarrick J. Wong
3021fb4cb1SDarrick J. Wong /* Superblock */
3121fb4cb1SDarrick J. Wong
32166d7641SDarrick J. Wong /* Cross-reference with the other btrees. */
33166d7641SDarrick J. Wong STATIC void
xchk_superblock_xref(struct xfs_scrub * sc,struct xfs_buf * bp)34c517b3aaSDarrick J. Wong xchk_superblock_xref(
351d8a748aSDarrick J. Wong struct xfs_scrub *sc,
36166d7641SDarrick J. Wong struct xfs_buf *bp)
37166d7641SDarrick J. Wong {
3852dc4b44SDarrick J. Wong struct xfs_mount *mp = sc->mp;
3952dc4b44SDarrick J. Wong xfs_agnumber_t agno = sc->sm->sm_agno;
4052dc4b44SDarrick J. Wong xfs_agblock_t agbno;
4152dc4b44SDarrick J. Wong int error;
4252dc4b44SDarrick J. Wong
43166d7641SDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
44166d7641SDarrick J. Wong return;
4552dc4b44SDarrick J. Wong
4652dc4b44SDarrick J. Wong agbno = XFS_SB_BLOCK(mp);
4752dc4b44SDarrick J. Wong
4848c6615cSDarrick J. Wong error = xchk_ag_init_existing(sc, agno, &sc->sa);
49c517b3aaSDarrick J. Wong if (!xchk_xref_process_error(sc, agno, agbno, &error))
5052dc4b44SDarrick J. Wong return;
5152dc4b44SDarrick J. Wong
52c517b3aaSDarrick J. Wong xchk_xref_is_used_space(sc, agbno, 1);
53c517b3aaSDarrick J. Wong xchk_xref_is_not_inode_chunk(sc, agbno, 1);
54*69115f77SDarrick J. Wong xchk_xref_is_only_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
55c517b3aaSDarrick J. Wong xchk_xref_is_not_shared(sc, agbno, 1);
567ac14fa2SDarrick J. Wong xchk_xref_is_not_cow_staging(sc, agbno, 1);
5752dc4b44SDarrick J. Wong
5852dc4b44SDarrick J. Wong /* scrub teardown will take care of sc->sa for us */
59166d7641SDarrick J. Wong }
60166d7641SDarrick J. Wong
6121fb4cb1SDarrick J. Wong /*
6221fb4cb1SDarrick J. Wong * Scrub the filesystem superblock.
6321fb4cb1SDarrick J. Wong *
6421fb4cb1SDarrick J. Wong * Note: We do /not/ attempt to check AG 0's superblock. Mount is
6521fb4cb1SDarrick J. Wong * responsible for validating all the geometry information in sb 0, so
6621fb4cb1SDarrick J. Wong * if the filesystem is capable of initiating online scrub, then clearly
6721fb4cb1SDarrick J. Wong * sb 0 is ok and we can use its information to check everything else.
6821fb4cb1SDarrick J. Wong */
6921fb4cb1SDarrick J. Wong int
xchk_superblock(struct xfs_scrub * sc)70c517b3aaSDarrick J. Wong xchk_superblock(
711d8a748aSDarrick J. Wong struct xfs_scrub *sc)
7221fb4cb1SDarrick J. Wong {
7321fb4cb1SDarrick J. Wong struct xfs_mount *mp = sc->mp;
7421fb4cb1SDarrick J. Wong struct xfs_buf *bp;
7521fb4cb1SDarrick J. Wong struct xfs_dsb *sb;
7648c6615cSDarrick J. Wong struct xfs_perag *pag;
7721fb4cb1SDarrick J. Wong xfs_agnumber_t agno;
7821fb4cb1SDarrick J. Wong uint32_t v2_ok;
7921fb4cb1SDarrick J. Wong __be32 features_mask;
8021fb4cb1SDarrick J. Wong int error;
8121fb4cb1SDarrick J. Wong __be16 vernum_mask;
8221fb4cb1SDarrick J. Wong
8321fb4cb1SDarrick J. Wong agno = sc->sm->sm_agno;
8421fb4cb1SDarrick J. Wong if (agno == 0)
8521fb4cb1SDarrick J. Wong return 0;
8621fb4cb1SDarrick J. Wong
8748c6615cSDarrick J. Wong /*
8848c6615cSDarrick J. Wong * Grab an active reference to the perag structure. If we can't get
8948c6615cSDarrick J. Wong * it, we're racing with something that's tearing down the AG, so
9048c6615cSDarrick J. Wong * signal that the AG no longer exists.
9148c6615cSDarrick J. Wong */
9248c6615cSDarrick J. Wong pag = xfs_perag_get(mp, agno);
9348c6615cSDarrick J. Wong if (!pag)
9448c6615cSDarrick J. Wong return -ENOENT;
9548c6615cSDarrick J. Wong
96689e11c8SDarrick J. Wong error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
97e5b37faaSDarrick J. Wong /*
98e5b37faaSDarrick J. Wong * The superblock verifier can return several different error codes
99e5b37faaSDarrick J. Wong * if it thinks the superblock doesn't look right. For a mount these
100e5b37faaSDarrick J. Wong * would all get bounced back to userspace, but if we're here then the
101e5b37faaSDarrick J. Wong * fs mounted successfully, which means that this secondary superblock
102e5b37faaSDarrick J. Wong * is simply incorrect. Treat all these codes the same way we treat
103e5b37faaSDarrick J. Wong * any corruption.
104e5b37faaSDarrick J. Wong */
105e5b37faaSDarrick J. Wong switch (error) {
106e5b37faaSDarrick J. Wong case -EINVAL: /* also -EWRONGFS */
107e5b37faaSDarrick J. Wong case -ENOSYS:
108e5b37faaSDarrick J. Wong case -EFBIG:
109e5b37faaSDarrick J. Wong error = -EFSCORRUPTED;
11053004ee7SGustavo A. R. Silva fallthrough;
111e5b37faaSDarrick J. Wong default:
112e5b37faaSDarrick J. Wong break;
113e5b37faaSDarrick J. Wong }
114c517b3aaSDarrick J. Wong if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
11548c6615cSDarrick J. Wong goto out_pag;
11621fb4cb1SDarrick J. Wong
1173e6e8afdSChristoph Hellwig sb = bp->b_addr;
11821fb4cb1SDarrick J. Wong
11921fb4cb1SDarrick J. Wong /*
12021fb4cb1SDarrick J. Wong * Verify the geometries match. Fields that are permanently
12121fb4cb1SDarrick J. Wong * set by mkfs are checked; fields that can be updated later
12221fb4cb1SDarrick J. Wong * (and are not propagated to backup superblocks) are preen
12321fb4cb1SDarrick J. Wong * checked.
12421fb4cb1SDarrick J. Wong */
12521fb4cb1SDarrick J. Wong if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
126c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
12721fb4cb1SDarrick J. Wong
12821fb4cb1SDarrick J. Wong if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
129c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
13021fb4cb1SDarrick J. Wong
13121fb4cb1SDarrick J. Wong if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
132c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
13321fb4cb1SDarrick J. Wong
13421fb4cb1SDarrick J. Wong if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
135c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
13621fb4cb1SDarrick J. Wong
13721fb4cb1SDarrick J. Wong if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
138c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
13921fb4cb1SDarrick J. Wong
14021fb4cb1SDarrick J. Wong if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
141c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
14221fb4cb1SDarrick J. Wong
14321fb4cb1SDarrick J. Wong if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
144c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
14521fb4cb1SDarrick J. Wong
14621fb4cb1SDarrick J. Wong if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
147c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
14821fb4cb1SDarrick J. Wong
14921fb4cb1SDarrick J. Wong if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
150c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
15121fb4cb1SDarrick J. Wong
15221fb4cb1SDarrick J. Wong if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
153c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
15421fb4cb1SDarrick J. Wong
15521fb4cb1SDarrick J. Wong if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
156c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
15721fb4cb1SDarrick J. Wong
15821fb4cb1SDarrick J. Wong if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
159c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
16021fb4cb1SDarrick J. Wong
16121fb4cb1SDarrick J. Wong if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
162c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
16321fb4cb1SDarrick J. Wong
16421fb4cb1SDarrick J. Wong if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
165c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
16621fb4cb1SDarrick J. Wong
16721fb4cb1SDarrick J. Wong /* Check sb_versionnum bits that are set at mkfs time. */
16821fb4cb1SDarrick J. Wong vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
16921fb4cb1SDarrick J. Wong XFS_SB_VERSION_NUMBITS |
17021fb4cb1SDarrick J. Wong XFS_SB_VERSION_ALIGNBIT |
17121fb4cb1SDarrick J. Wong XFS_SB_VERSION_DALIGNBIT |
17221fb4cb1SDarrick J. Wong XFS_SB_VERSION_SHAREDBIT |
17321fb4cb1SDarrick J. Wong XFS_SB_VERSION_LOGV2BIT |
17421fb4cb1SDarrick J. Wong XFS_SB_VERSION_SECTORBIT |
17521fb4cb1SDarrick J. Wong XFS_SB_VERSION_EXTFLGBIT |
17621fb4cb1SDarrick J. Wong XFS_SB_VERSION_DIRV2BIT);
17721fb4cb1SDarrick J. Wong if ((sb->sb_versionnum & vernum_mask) !=
17821fb4cb1SDarrick J. Wong (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
179c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
18021fb4cb1SDarrick J. Wong
18121fb4cb1SDarrick J. Wong /* Check sb_versionnum bits that can be set after mkfs time. */
18221fb4cb1SDarrick J. Wong vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
18321fb4cb1SDarrick J. Wong XFS_SB_VERSION_NLINKBIT |
18421fb4cb1SDarrick J. Wong XFS_SB_VERSION_QUOTABIT);
18521fb4cb1SDarrick J. Wong if ((sb->sb_versionnum & vernum_mask) !=
18621fb4cb1SDarrick J. Wong (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
187c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
18821fb4cb1SDarrick J. Wong
18921fb4cb1SDarrick J. Wong if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
190c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
19121fb4cb1SDarrick J. Wong
19221fb4cb1SDarrick J. Wong if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
193c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
19421fb4cb1SDarrick J. Wong
19521fb4cb1SDarrick J. Wong if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
196c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
19721fb4cb1SDarrick J. Wong
19821fb4cb1SDarrick J. Wong if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
199c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
20021fb4cb1SDarrick J. Wong
20121fb4cb1SDarrick J. Wong if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
202c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
20321fb4cb1SDarrick J. Wong
20421fb4cb1SDarrick J. Wong if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
205c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
20621fb4cb1SDarrick J. Wong
20721fb4cb1SDarrick J. Wong if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
208c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
20921fb4cb1SDarrick J. Wong
21021fb4cb1SDarrick J. Wong if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
211c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
21221fb4cb1SDarrick J. Wong
21321fb4cb1SDarrick J. Wong if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
214c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
21521fb4cb1SDarrick J. Wong
21621fb4cb1SDarrick J. Wong if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
217c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
21821fb4cb1SDarrick J. Wong
21921fb4cb1SDarrick J. Wong if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
220c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
22121fb4cb1SDarrick J. Wong
22221fb4cb1SDarrick J. Wong /*
22321fb4cb1SDarrick J. Wong * Skip the summary counters since we track them in memory anyway.
22421fb4cb1SDarrick J. Wong * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
22521fb4cb1SDarrick J. Wong */
22621fb4cb1SDarrick J. Wong
22721fb4cb1SDarrick J. Wong if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
228c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
22921fb4cb1SDarrick J. Wong
23021fb4cb1SDarrick J. Wong if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
231c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
23221fb4cb1SDarrick J. Wong
23321fb4cb1SDarrick J. Wong /*
23421fb4cb1SDarrick J. Wong * Skip the quota flags since repair will force quotacheck.
23521fb4cb1SDarrick J. Wong * sb_qflags
23621fb4cb1SDarrick J. Wong */
23721fb4cb1SDarrick J. Wong
23821fb4cb1SDarrick J. Wong if (sb->sb_flags != mp->m_sb.sb_flags)
239c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
24021fb4cb1SDarrick J. Wong
24121fb4cb1SDarrick J. Wong if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
242c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
24321fb4cb1SDarrick J. Wong
24421fb4cb1SDarrick J. Wong if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
245c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
24621fb4cb1SDarrick J. Wong
24721fb4cb1SDarrick J. Wong if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
248c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
24921fb4cb1SDarrick J. Wong
25021fb4cb1SDarrick J. Wong if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
251c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
25221fb4cb1SDarrick J. Wong
25321fb4cb1SDarrick J. Wong if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
254c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
25521fb4cb1SDarrick J. Wong
25621fb4cb1SDarrick J. Wong if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
257c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
25821fb4cb1SDarrick J. Wong
25921fb4cb1SDarrick J. Wong if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
260c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
26121fb4cb1SDarrick J. Wong
26221fb4cb1SDarrick J. Wong if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
263c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
26421fb4cb1SDarrick J. Wong
26521fb4cb1SDarrick J. Wong /* Do we see any invalid bits in sb_features2? */
26621fb4cb1SDarrick J. Wong if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
26721fb4cb1SDarrick J. Wong if (sb->sb_features2 != 0)
268c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
26921fb4cb1SDarrick J. Wong } else {
27021fb4cb1SDarrick J. Wong v2_ok = XFS_SB_VERSION2_OKBITS;
271d6837c1aSDave Chinner if (xfs_sb_is_v5(&mp->m_sb))
27221fb4cb1SDarrick J. Wong v2_ok |= XFS_SB_VERSION2_CRCBIT;
27321fb4cb1SDarrick J. Wong
27421fb4cb1SDarrick J. Wong if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
275c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
27621fb4cb1SDarrick J. Wong
27721fb4cb1SDarrick J. Wong if (sb->sb_features2 != sb->sb_bad_features2)
278c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
27921fb4cb1SDarrick J. Wong }
28021fb4cb1SDarrick J. Wong
28121fb4cb1SDarrick J. Wong /* Check sb_features2 flags that are set at mkfs time. */
28221fb4cb1SDarrick J. Wong features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
28321fb4cb1SDarrick J. Wong XFS_SB_VERSION2_PROJID32BIT |
28421fb4cb1SDarrick J. Wong XFS_SB_VERSION2_CRCBIT |
28521fb4cb1SDarrick J. Wong XFS_SB_VERSION2_FTYPE);
28621fb4cb1SDarrick J. Wong if ((sb->sb_features2 & features_mask) !=
28721fb4cb1SDarrick J. Wong (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
288c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
28921fb4cb1SDarrick J. Wong
29021fb4cb1SDarrick J. Wong /* Check sb_features2 flags that can be set after mkfs time. */
29121fb4cb1SDarrick J. Wong features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
29221fb4cb1SDarrick J. Wong if ((sb->sb_features2 & features_mask) !=
29321fb4cb1SDarrick J. Wong (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
2944a9bca86SDarrick J. Wong xchk_block_set_preen(sc, bp);
29521fb4cb1SDarrick J. Wong
29638c26bfdSDave Chinner if (!xfs_has_crc(mp)) {
29721fb4cb1SDarrick J. Wong /* all v5 fields must be zero */
29821fb4cb1SDarrick J. Wong if (memchr_inv(&sb->sb_features_compat, 0,
29921fb4cb1SDarrick J. Wong sizeof(struct xfs_dsb) -
30021fb4cb1SDarrick J. Wong offsetof(struct xfs_dsb, sb_features_compat)))
301c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
30221fb4cb1SDarrick J. Wong } else {
3034a9bca86SDarrick J. Wong /* compat features must match */
3044a9bca86SDarrick J. Wong if (sb->sb_features_compat !=
3054a9bca86SDarrick J. Wong cpu_to_be32(mp->m_sb.sb_features_compat))
306c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
30721fb4cb1SDarrick J. Wong
3084a9bca86SDarrick J. Wong /* ro compat features must match */
3094a9bca86SDarrick J. Wong if (sb->sb_features_ro_compat !=
3104a9bca86SDarrick J. Wong cpu_to_be32(mp->m_sb.sb_features_ro_compat))
311c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
31221fb4cb1SDarrick J. Wong
3134a9bca86SDarrick J. Wong /*
3144a9bca86SDarrick J. Wong * NEEDSREPAIR is ignored on a secondary super, so we should
3154a9bca86SDarrick J. Wong * clear it when we find it, though it's not a corruption.
3164a9bca86SDarrick J. Wong */
3174a9bca86SDarrick J. Wong features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR);
3184a9bca86SDarrick J. Wong if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^
3194a9bca86SDarrick J. Wong sb->sb_features_incompat) & features_mask)
3204a9bca86SDarrick J. Wong xchk_block_set_preen(sc, bp);
3214a9bca86SDarrick J. Wong
3224a9bca86SDarrick J. Wong /* all other incompat features must match */
3234a9bca86SDarrick J. Wong if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^
3244a9bca86SDarrick J. Wong sb->sb_features_incompat) & ~features_mask)
325c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
32621fb4cb1SDarrick J. Wong
3274a9bca86SDarrick J. Wong /*
3284a9bca86SDarrick J. Wong * log incompat features protect newer log record types from
3294a9bca86SDarrick J. Wong * older log recovery code. Log recovery doesn't check the
3304a9bca86SDarrick J. Wong * secondary supers, so we can clear these if needed.
3314a9bca86SDarrick J. Wong */
3324a9bca86SDarrick J. Wong if (sb->sb_features_log_incompat)
3334a9bca86SDarrick J. Wong xchk_block_set_preen(sc, bp);
33421fb4cb1SDarrick J. Wong
33521fb4cb1SDarrick J. Wong /* Don't care about sb_crc */
33621fb4cb1SDarrick J. Wong
33721fb4cb1SDarrick J. Wong if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
338c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
33921fb4cb1SDarrick J. Wong
34021fb4cb1SDarrick J. Wong if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
341c517b3aaSDarrick J. Wong xchk_block_set_preen(sc, bp);
34221fb4cb1SDarrick J. Wong
34321fb4cb1SDarrick J. Wong /* Don't care about sb_lsn */
34421fb4cb1SDarrick J. Wong }
34521fb4cb1SDarrick J. Wong
34638c26bfdSDave Chinner if (xfs_has_metauuid(mp)) {
34721fb4cb1SDarrick J. Wong /* The metadata UUID must be the same for all supers */
34821fb4cb1SDarrick J. Wong if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
349c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
35021fb4cb1SDarrick J. Wong }
35121fb4cb1SDarrick J. Wong
35221fb4cb1SDarrick J. Wong /* Everything else must be zero. */
35321fb4cb1SDarrick J. Wong if (memchr_inv(sb + 1, 0,
35421fb4cb1SDarrick J. Wong BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
355c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, bp);
35621fb4cb1SDarrick J. Wong
357c517b3aaSDarrick J. Wong xchk_superblock_xref(sc, bp);
35848c6615cSDarrick J. Wong out_pag:
35948c6615cSDarrick J. Wong xfs_perag_put(pag);
36021fb4cb1SDarrick J. Wong return error;
36121fb4cb1SDarrick J. Wong }
362ab9d5dc5SDarrick J. Wong
363ab9d5dc5SDarrick J. Wong /* AGF */
364ab9d5dc5SDarrick J. Wong
36552dc4b44SDarrick J. Wong /* Tally freespace record lengths. */
36652dc4b44SDarrick J. Wong STATIC int
xchk_agf_record_bno_lengths(struct xfs_btree_cur * cur,const struct xfs_alloc_rec_incore * rec,void * priv)367c517b3aaSDarrick J. Wong xchk_agf_record_bno_lengths(
36852dc4b44SDarrick J. Wong struct xfs_btree_cur *cur,
369159eb69dSDarrick J. Wong const struct xfs_alloc_rec_incore *rec,
37052dc4b44SDarrick J. Wong void *priv)
37152dc4b44SDarrick J. Wong {
37252dc4b44SDarrick J. Wong xfs_extlen_t *blocks = priv;
37352dc4b44SDarrick J. Wong
37452dc4b44SDarrick J. Wong (*blocks) += rec->ar_blockcount;
37552dc4b44SDarrick J. Wong return 0;
37652dc4b44SDarrick J. Wong }
37752dc4b44SDarrick J. Wong
37852dc4b44SDarrick J. Wong /* Check agf_freeblks */
37952dc4b44SDarrick J. Wong static inline void
xchk_agf_xref_freeblks(struct xfs_scrub * sc)380c517b3aaSDarrick J. Wong xchk_agf_xref_freeblks(
3811d8a748aSDarrick J. Wong struct xfs_scrub *sc)
38252dc4b44SDarrick J. Wong {
3839798f615SChristoph Hellwig struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
38452dc4b44SDarrick J. Wong xfs_extlen_t blocks = 0;
38552dc4b44SDarrick J. Wong int error;
38652dc4b44SDarrick J. Wong
38752dc4b44SDarrick J. Wong if (!sc->sa.bno_cur)
38852dc4b44SDarrick J. Wong return;
38952dc4b44SDarrick J. Wong
39052dc4b44SDarrick J. Wong error = xfs_alloc_query_all(sc->sa.bno_cur,
391c517b3aaSDarrick J. Wong xchk_agf_record_bno_lengths, &blocks);
392c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
39352dc4b44SDarrick J. Wong return;
39452dc4b44SDarrick J. Wong if (blocks != be32_to_cpu(agf->agf_freeblks))
395c517b3aaSDarrick J. Wong xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
39652dc4b44SDarrick J. Wong }
39752dc4b44SDarrick J. Wong
398e1134b12SDarrick J. Wong /* Cross reference the AGF with the cntbt (freespace by length btree) */
399e1134b12SDarrick J. Wong static inline void
xchk_agf_xref_cntbt(struct xfs_scrub * sc)400c517b3aaSDarrick J. Wong xchk_agf_xref_cntbt(
4011d8a748aSDarrick J. Wong struct xfs_scrub *sc)
402e1134b12SDarrick J. Wong {
4039798f615SChristoph Hellwig struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
404e1134b12SDarrick J. Wong xfs_agblock_t agbno;
405e1134b12SDarrick J. Wong xfs_extlen_t blocks;
406e1134b12SDarrick J. Wong int have;
407e1134b12SDarrick J. Wong int error;
408e1134b12SDarrick J. Wong
409e1134b12SDarrick J. Wong if (!sc->sa.cnt_cur)
410e1134b12SDarrick J. Wong return;
411e1134b12SDarrick J. Wong
412e1134b12SDarrick J. Wong /* Any freespace at all? */
413e1134b12SDarrick J. Wong error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
414c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
415e1134b12SDarrick J. Wong return;
416e1134b12SDarrick J. Wong if (!have) {
4173d129e1bSDarrick J. Wong if (agf->agf_freeblks != cpu_to_be32(0))
418c517b3aaSDarrick J. Wong xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
419e1134b12SDarrick J. Wong return;
420e1134b12SDarrick J. Wong }
421e1134b12SDarrick J. Wong
422e1134b12SDarrick J. Wong /* Check agf_longest */
423e1134b12SDarrick J. Wong error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
424c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
425e1134b12SDarrick J. Wong return;
426e1134b12SDarrick J. Wong if (!have || blocks != be32_to_cpu(agf->agf_longest))
427c517b3aaSDarrick J. Wong xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
428e1134b12SDarrick J. Wong }
429e1134b12SDarrick J. Wong
430d852657cSDarrick J. Wong /* Check the btree block counts in the AGF against the btrees. */
431d852657cSDarrick J. Wong STATIC void
xchk_agf_xref_btreeblks(struct xfs_scrub * sc)432c517b3aaSDarrick J. Wong xchk_agf_xref_btreeblks(
4331d8a748aSDarrick J. Wong struct xfs_scrub *sc)
434d852657cSDarrick J. Wong {
4359798f615SChristoph Hellwig struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
436d852657cSDarrick J. Wong struct xfs_mount *mp = sc->mp;
437d852657cSDarrick J. Wong xfs_agblock_t blocks;
438d852657cSDarrick J. Wong xfs_agblock_t btreeblks;
439d852657cSDarrick J. Wong int error;
440d852657cSDarrick J. Wong
441e6c01077SDarrick J. Wong /* agf_btreeblks didn't exist before lazysbcount */
442ebd9027dSDave Chinner if (!xfs_has_lazysbcount(sc->mp))
443e6c01077SDarrick J. Wong return;
444e6c01077SDarrick J. Wong
445d852657cSDarrick J. Wong /* Check agf_rmap_blocks; set up for agf_btreeblks check */
446d852657cSDarrick J. Wong if (sc->sa.rmap_cur) {
447d852657cSDarrick J. Wong error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
448c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
449d852657cSDarrick J. Wong return;
450d852657cSDarrick J. Wong btreeblks = blocks - 1;
451d852657cSDarrick J. Wong if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
452c517b3aaSDarrick J. Wong xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
453d852657cSDarrick J. Wong } else {
454d852657cSDarrick J. Wong btreeblks = 0;
455d852657cSDarrick J. Wong }
456d852657cSDarrick J. Wong
457d852657cSDarrick J. Wong /*
458d852657cSDarrick J. Wong * No rmap cursor; we can't xref if we have the rmapbt feature.
459d852657cSDarrick J. Wong * We also can't do it if we're missing the free space btree cursors.
460d852657cSDarrick J. Wong */
46138c26bfdSDave Chinner if ((xfs_has_rmapbt(mp) && !sc->sa.rmap_cur) ||
462d852657cSDarrick J. Wong !sc->sa.bno_cur || !sc->sa.cnt_cur)
463d852657cSDarrick J. Wong return;
464d852657cSDarrick J. Wong
465d852657cSDarrick J. Wong /* Check agf_btreeblks */
466d852657cSDarrick J. Wong error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
467c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
468d852657cSDarrick J. Wong return;
469d852657cSDarrick J. Wong btreeblks += blocks - 1;
470d852657cSDarrick J. Wong
471d852657cSDarrick J. Wong error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
472c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
473d852657cSDarrick J. Wong return;
474d852657cSDarrick J. Wong btreeblks += blocks - 1;
475d852657cSDarrick J. Wong
476d852657cSDarrick J. Wong if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
477c517b3aaSDarrick J. Wong xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
478d852657cSDarrick J. Wong }
479d852657cSDarrick J. Wong
480f6d5fc21SDarrick J. Wong /* Check agf_refcount_blocks against tree size */
481f6d5fc21SDarrick J. Wong static inline void
xchk_agf_xref_refcblks(struct xfs_scrub * sc)482c517b3aaSDarrick J. Wong xchk_agf_xref_refcblks(
4831d8a748aSDarrick J. Wong struct xfs_scrub *sc)
484f6d5fc21SDarrick J. Wong {
4859798f615SChristoph Hellwig struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
486f6d5fc21SDarrick J. Wong xfs_agblock_t blocks;
487f6d5fc21SDarrick J. Wong int error;
488f6d5fc21SDarrick J. Wong
489f6d5fc21SDarrick J. Wong if (!sc->sa.refc_cur)
490f6d5fc21SDarrick J. Wong return;
491f6d5fc21SDarrick J. Wong
492f6d5fc21SDarrick J. Wong error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
493c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
494f6d5fc21SDarrick J. Wong return;
495f6d5fc21SDarrick J. Wong if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
496c517b3aaSDarrick J. Wong xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
497f6d5fc21SDarrick J. Wong }
498f6d5fc21SDarrick J. Wong
499166d7641SDarrick J. Wong /* Cross-reference with the other btrees. */
500166d7641SDarrick J. Wong STATIC void
xchk_agf_xref(struct xfs_scrub * sc)501c517b3aaSDarrick J. Wong xchk_agf_xref(
5021d8a748aSDarrick J. Wong struct xfs_scrub *sc)
503166d7641SDarrick J. Wong {
50452dc4b44SDarrick J. Wong struct xfs_mount *mp = sc->mp;
50552dc4b44SDarrick J. Wong xfs_agblock_t agbno;
50652dc4b44SDarrick J. Wong
507166d7641SDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
508166d7641SDarrick J. Wong return;
50952dc4b44SDarrick J. Wong
51052dc4b44SDarrick J. Wong agbno = XFS_AGF_BLOCK(mp);
51152dc4b44SDarrick J. Wong
512f53acfacSDarrick J. Wong xchk_ag_btcur_init(sc, &sc->sa);
51352dc4b44SDarrick J. Wong
514c517b3aaSDarrick J. Wong xchk_xref_is_used_space(sc, agbno, 1);
515c517b3aaSDarrick J. Wong xchk_agf_xref_freeblks(sc);
516c517b3aaSDarrick J. Wong xchk_agf_xref_cntbt(sc);
517c517b3aaSDarrick J. Wong xchk_xref_is_not_inode_chunk(sc, agbno, 1);
518*69115f77SDarrick J. Wong xchk_xref_is_only_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
519c517b3aaSDarrick J. Wong xchk_agf_xref_btreeblks(sc);
520c517b3aaSDarrick J. Wong xchk_xref_is_not_shared(sc, agbno, 1);
5217ac14fa2SDarrick J. Wong xchk_xref_is_not_cow_staging(sc, agbno, 1);
522c517b3aaSDarrick J. Wong xchk_agf_xref_refcblks(sc);
52352dc4b44SDarrick J. Wong
52452dc4b44SDarrick J. Wong /* scrub teardown will take care of sc->sa for us */
525166d7641SDarrick J. Wong }
526166d7641SDarrick J. Wong
527ab9d5dc5SDarrick J. Wong /* Scrub the AGF. */
528ab9d5dc5SDarrick J. Wong int
xchk_agf(struct xfs_scrub * sc)529c517b3aaSDarrick J. Wong xchk_agf(
5301d8a748aSDarrick J. Wong struct xfs_scrub *sc)
531ab9d5dc5SDarrick J. Wong {
532ab9d5dc5SDarrick J. Wong struct xfs_mount *mp = sc->mp;
533ab9d5dc5SDarrick J. Wong struct xfs_agf *agf;
53447cd97b5SDarrick J. Wong struct xfs_perag *pag;
535de9d2a78SDarrick J. Wong xfs_agnumber_t agno = sc->sm->sm_agno;
536ab9d5dc5SDarrick J. Wong xfs_agblock_t agbno;
537ab9d5dc5SDarrick J. Wong xfs_agblock_t eoag;
538ab9d5dc5SDarrick J. Wong xfs_agblock_t agfl_first;
539ab9d5dc5SDarrick J. Wong xfs_agblock_t agfl_last;
540ab9d5dc5SDarrick J. Wong xfs_agblock_t agfl_count;
541ab9d5dc5SDarrick J. Wong xfs_agblock_t fl_count;
542ab9d5dc5SDarrick J. Wong int level;
543ab9d5dc5SDarrick J. Wong int error = 0;
544ab9d5dc5SDarrick J. Wong
545de9d2a78SDarrick J. Wong error = xchk_ag_read_headers(sc, agno, &sc->sa);
546c517b3aaSDarrick J. Wong if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
547ab9d5dc5SDarrick J. Wong goto out;
548c517b3aaSDarrick J. Wong xchk_buffer_recheck(sc, sc->sa.agf_bp);
549ab9d5dc5SDarrick J. Wong
5509798f615SChristoph Hellwig agf = sc->sa.agf_bp->b_addr;
55148c6615cSDarrick J. Wong pag = sc->sa.pag;
552ab9d5dc5SDarrick J. Wong
553ab9d5dc5SDarrick J. Wong /* Check the AG length */
554ab9d5dc5SDarrick J. Wong eoag = be32_to_cpu(agf->agf_length);
5550800169eSDave Chinner if (eoag != pag->block_count)
556c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
557ab9d5dc5SDarrick J. Wong
558ab9d5dc5SDarrick J. Wong /* Check the AGF btree roots and levels */
559ab9d5dc5SDarrick J. Wong agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
5600800169eSDave Chinner if (!xfs_verify_agbno(pag, agbno))
561c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
562ab9d5dc5SDarrick J. Wong
563ab9d5dc5SDarrick J. Wong agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
5640800169eSDave Chinner if (!xfs_verify_agbno(pag, agbno))
565c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
566ab9d5dc5SDarrick J. Wong
567ab9d5dc5SDarrick J. Wong level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
5687cb3efb4SDarrick J. Wong if (level <= 0 || level > mp->m_alloc_maxlevels)
569c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
570ab9d5dc5SDarrick J. Wong
571ab9d5dc5SDarrick J. Wong level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
5727cb3efb4SDarrick J. Wong if (level <= 0 || level > mp->m_alloc_maxlevels)
573c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
574ab9d5dc5SDarrick J. Wong
57538c26bfdSDave Chinner if (xfs_has_rmapbt(mp)) {
576ab9d5dc5SDarrick J. Wong agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
5770800169eSDave Chinner if (!xfs_verify_agbno(pag, agbno))
578c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
579ab9d5dc5SDarrick J. Wong
580ab9d5dc5SDarrick J. Wong level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
581f4585e82SDarrick J. Wong if (level <= 0 || level > mp->m_rmap_maxlevels)
582c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
583ab9d5dc5SDarrick J. Wong }
584ab9d5dc5SDarrick J. Wong
58538c26bfdSDave Chinner if (xfs_has_reflink(mp)) {
586ab9d5dc5SDarrick J. Wong agbno = be32_to_cpu(agf->agf_refcount_root);
5870800169eSDave Chinner if (!xfs_verify_agbno(pag, agbno))
588c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
589ab9d5dc5SDarrick J. Wong
590ab9d5dc5SDarrick J. Wong level = be32_to_cpu(agf->agf_refcount_level);
591f4585e82SDarrick J. Wong if (level <= 0 || level > mp->m_refc_maxlevels)
592c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
593ab9d5dc5SDarrick J. Wong }
594ab9d5dc5SDarrick J. Wong
595ab9d5dc5SDarrick J. Wong /* Check the AGFL counters */
596ab9d5dc5SDarrick J. Wong agfl_first = be32_to_cpu(agf->agf_flfirst);
597ab9d5dc5SDarrick J. Wong agfl_last = be32_to_cpu(agf->agf_fllast);
598ab9d5dc5SDarrick J. Wong agfl_count = be32_to_cpu(agf->agf_flcount);
599ab9d5dc5SDarrick J. Wong if (agfl_last > agfl_first)
600ab9d5dc5SDarrick J. Wong fl_count = agfl_last - agfl_first + 1;
601ab9d5dc5SDarrick J. Wong else
602a78ee256SDave Chinner fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
603ab9d5dc5SDarrick J. Wong if (agfl_count != 0 && fl_count != agfl_count)
604c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
605ab9d5dc5SDarrick J. Wong
60647cd97b5SDarrick J. Wong /* Do the incore counters match? */
60747cd97b5SDarrick J. Wong if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks))
60847cd97b5SDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
60947cd97b5SDarrick J. Wong if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount))
61047cd97b5SDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
611ebd9027dSDave Chinner if (xfs_has_lazysbcount(sc->mp) &&
612e6c01077SDarrick J. Wong pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks))
61347cd97b5SDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
61447cd97b5SDarrick J. Wong
615c517b3aaSDarrick J. Wong xchk_agf_xref(sc);
616ab9d5dc5SDarrick J. Wong out:
617ab9d5dc5SDarrick J. Wong return error;
618ab9d5dc5SDarrick J. Wong }
619ab9d5dc5SDarrick J. Wong
620ab9d5dc5SDarrick J. Wong /* AGFL */
621ab9d5dc5SDarrick J. Wong
622c517b3aaSDarrick J. Wong struct xchk_agfl_info {
623be1317fdSDarrick J. Wong /* Number of AGFL entries that the AGF claims are in use. */
624be1317fdSDarrick J. Wong unsigned int agflcount;
625be1317fdSDarrick J. Wong
626be1317fdSDarrick J. Wong /* Number of AGFL entries that we found. */
627d44b47fdSDarrick J. Wong unsigned int nr_entries;
628be1317fdSDarrick J. Wong
629be1317fdSDarrick J. Wong /* Buffer to hold AGFL entries for extent checking. */
630d44b47fdSDarrick J. Wong xfs_agblock_t *entries;
631be1317fdSDarrick J. Wong
632be1317fdSDarrick J. Wong struct xfs_buf *agfl_bp;
6331d8a748aSDarrick J. Wong struct xfs_scrub *sc;
634d44b47fdSDarrick J. Wong };
635d44b47fdSDarrick J. Wong
636166d7641SDarrick J. Wong /* Cross-reference with the other btrees. */
637166d7641SDarrick J. Wong STATIC void
xchk_agfl_block_xref(struct xfs_scrub * sc,xfs_agblock_t agbno)638c517b3aaSDarrick J. Wong xchk_agfl_block_xref(
6391d8a748aSDarrick J. Wong struct xfs_scrub *sc,
6407280fedaSDarrick J. Wong xfs_agblock_t agbno)
641166d7641SDarrick J. Wong {
642166d7641SDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
643166d7641SDarrick J. Wong return;
64452dc4b44SDarrick J. Wong
645c517b3aaSDarrick J. Wong xchk_xref_is_used_space(sc, agbno, 1);
646c517b3aaSDarrick J. Wong xchk_xref_is_not_inode_chunk(sc, agbno, 1);
647*69115f77SDarrick J. Wong xchk_xref_is_only_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
648c517b3aaSDarrick J. Wong xchk_xref_is_not_shared(sc, agbno, 1);
6497ac14fa2SDarrick J. Wong xchk_xref_is_not_cow_staging(sc, agbno, 1);
650166d7641SDarrick J. Wong }
651166d7641SDarrick J. Wong
652ab9d5dc5SDarrick J. Wong /* Scrub an AGFL block. */
653ab9d5dc5SDarrick J. Wong STATIC int
xchk_agfl_block(struct xfs_mount * mp,xfs_agblock_t agbno,void * priv)654c517b3aaSDarrick J. Wong xchk_agfl_block(
6559f3a080eSDarrick J. Wong struct xfs_mount *mp,
656ab9d5dc5SDarrick J. Wong xfs_agblock_t agbno,
657ab9d5dc5SDarrick J. Wong void *priv)
658ab9d5dc5SDarrick J. Wong {
659c517b3aaSDarrick J. Wong struct xchk_agfl_info *sai = priv;
6601d8a748aSDarrick J. Wong struct xfs_scrub *sc = sai->sc;
661ab9d5dc5SDarrick J. Wong
6620800169eSDave Chinner if (xfs_verify_agbno(sc->sa.pag, agbno) &&
663be1317fdSDarrick J. Wong sai->nr_entries < sai->agflcount)
664d44b47fdSDarrick J. Wong sai->entries[sai->nr_entries++] = agbno;
665d44b47fdSDarrick J. Wong else
666be1317fdSDarrick J. Wong xchk_block_set_corrupt(sc, sai->agfl_bp);
667ab9d5dc5SDarrick J. Wong
6687280fedaSDarrick J. Wong xchk_agfl_block_xref(sc, agbno);
669166d7641SDarrick J. Wong
6709f3a080eSDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
671e7ee96dfSDarrick J. Wong return -ECANCELED;
6729f3a080eSDarrick J. Wong
673ab9d5dc5SDarrick J. Wong return 0;
674ab9d5dc5SDarrick J. Wong }
675ab9d5dc5SDarrick J. Wong
676d44b47fdSDarrick J. Wong static int
xchk_agblock_cmp(const void * pa,const void * pb)677c517b3aaSDarrick J. Wong xchk_agblock_cmp(
678d44b47fdSDarrick J. Wong const void *pa,
679d44b47fdSDarrick J. Wong const void *pb)
680d44b47fdSDarrick J. Wong {
681d44b47fdSDarrick J. Wong const xfs_agblock_t *a = pa;
682d44b47fdSDarrick J. Wong const xfs_agblock_t *b = pb;
683d44b47fdSDarrick J. Wong
684d44b47fdSDarrick J. Wong return (int)*a - (int)*b;
685d44b47fdSDarrick J. Wong }
686d44b47fdSDarrick J. Wong
687166d7641SDarrick J. Wong /* Cross-reference with the other btrees. */
688166d7641SDarrick J. Wong STATIC void
xchk_agfl_xref(struct xfs_scrub * sc)689c517b3aaSDarrick J. Wong xchk_agfl_xref(
6901d8a748aSDarrick J. Wong struct xfs_scrub *sc)
691166d7641SDarrick J. Wong {
69252dc4b44SDarrick J. Wong struct xfs_mount *mp = sc->mp;
69352dc4b44SDarrick J. Wong xfs_agblock_t agbno;
69452dc4b44SDarrick J. Wong
695166d7641SDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
696166d7641SDarrick J. Wong return;
69752dc4b44SDarrick J. Wong
69852dc4b44SDarrick J. Wong agbno = XFS_AGFL_BLOCK(mp);
69952dc4b44SDarrick J. Wong
700f53acfacSDarrick J. Wong xchk_ag_btcur_init(sc, &sc->sa);
70152dc4b44SDarrick J. Wong
702c517b3aaSDarrick J. Wong xchk_xref_is_used_space(sc, agbno, 1);
703c517b3aaSDarrick J. Wong xchk_xref_is_not_inode_chunk(sc, agbno, 1);
704*69115f77SDarrick J. Wong xchk_xref_is_only_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
705c517b3aaSDarrick J. Wong xchk_xref_is_not_shared(sc, agbno, 1);
7067ac14fa2SDarrick J. Wong xchk_xref_is_not_cow_staging(sc, agbno, 1);
70752dc4b44SDarrick J. Wong
70852dc4b44SDarrick J. Wong /*
70952dc4b44SDarrick J. Wong * Scrub teardown will take care of sc->sa for us. Leave sc->sa
71052dc4b44SDarrick J. Wong * active so that the agfl block xref can use it too.
71152dc4b44SDarrick J. Wong */
712166d7641SDarrick J. Wong }
713166d7641SDarrick J. Wong
714ab9d5dc5SDarrick J. Wong /* Scrub the AGFL. */
715ab9d5dc5SDarrick J. Wong int
xchk_agfl(struct xfs_scrub * sc)716c517b3aaSDarrick J. Wong xchk_agfl(
7171d8a748aSDarrick J. Wong struct xfs_scrub *sc)
718ab9d5dc5SDarrick J. Wong {
719be1317fdSDarrick J. Wong struct xchk_agfl_info sai = {
720be1317fdSDarrick J. Wong .sc = sc,
721be1317fdSDarrick J. Wong };
722d44b47fdSDarrick J. Wong struct xfs_agf *agf;
723de9d2a78SDarrick J. Wong xfs_agnumber_t agno = sc->sm->sm_agno;
724d44b47fdSDarrick J. Wong unsigned int i;
725ab9d5dc5SDarrick J. Wong int error;
726ab9d5dc5SDarrick J. Wong
727be1317fdSDarrick J. Wong /* Lock the AGF and AGI so that nobody can touch this AG. */
728de9d2a78SDarrick J. Wong error = xchk_ag_read_headers(sc, agno, &sc->sa);
729c517b3aaSDarrick J. Wong if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
730be1317fdSDarrick J. Wong return error;
731ab9d5dc5SDarrick J. Wong if (!sc->sa.agf_bp)
732ab9d5dc5SDarrick J. Wong return -EFSCORRUPTED;
733be1317fdSDarrick J. Wong
734be1317fdSDarrick J. Wong /* Try to read the AGFL, and verify its structure if we get it. */
735be1317fdSDarrick J. Wong error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &sai.agfl_bp);
736be1317fdSDarrick J. Wong if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
737be1317fdSDarrick J. Wong return error;
738be1317fdSDarrick J. Wong xchk_buffer_recheck(sc, sai.agfl_bp);
739ab9d5dc5SDarrick J. Wong
740c517b3aaSDarrick J. Wong xchk_agfl_xref(sc);
741166d7641SDarrick J. Wong
742166d7641SDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
743166d7641SDarrick J. Wong goto out;
744166d7641SDarrick J. Wong
745d44b47fdSDarrick J. Wong /* Allocate buffer to ensure uniqueness of AGFL entries. */
7469798f615SChristoph Hellwig agf = sc->sa.agf_bp->b_addr;
747be1317fdSDarrick J. Wong sai.agflcount = be32_to_cpu(agf->agf_flcount);
748be1317fdSDarrick J. Wong if (sai.agflcount > xfs_agfl_size(sc->mp)) {
749c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
750d44b47fdSDarrick J. Wong goto out;
751d44b47fdSDarrick J. Wong }
752be1317fdSDarrick J. Wong sai.entries = kvcalloc(sai.agflcount, sizeof(xfs_agblock_t),
75348ff4045SDarrick J. Wong XCHK_GFP_FLAGS);
754d44b47fdSDarrick J. Wong if (!sai.entries) {
755d44b47fdSDarrick J. Wong error = -ENOMEM;
756d44b47fdSDarrick J. Wong goto out;
757d44b47fdSDarrick J. Wong }
758d44b47fdSDarrick J. Wong
759ab9d5dc5SDarrick J. Wong /* Check the blocks in the AGFL. */
760be1317fdSDarrick J. Wong error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr, sai.agfl_bp,
761be1317fdSDarrick J. Wong xchk_agfl_block, &sai);
762e7ee96dfSDarrick J. Wong if (error == -ECANCELED) {
7639f3a080eSDarrick J. Wong error = 0;
7649f3a080eSDarrick J. Wong goto out_free;
7659f3a080eSDarrick J. Wong }
766d44b47fdSDarrick J. Wong if (error)
767d44b47fdSDarrick J. Wong goto out_free;
768d44b47fdSDarrick J. Wong
769be1317fdSDarrick J. Wong if (sai.agflcount != sai.nr_entries) {
770c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
771d44b47fdSDarrick J. Wong goto out_free;
772d44b47fdSDarrick J. Wong }
773d44b47fdSDarrick J. Wong
774d44b47fdSDarrick J. Wong /* Sort entries, check for duplicates. */
775d44b47fdSDarrick J. Wong sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
776c517b3aaSDarrick J. Wong xchk_agblock_cmp, NULL);
777d44b47fdSDarrick J. Wong for (i = 1; i < sai.nr_entries; i++) {
778d44b47fdSDarrick J. Wong if (sai.entries[i] == sai.entries[i - 1]) {
779c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agf_bp);
780d44b47fdSDarrick J. Wong break;
781d44b47fdSDarrick J. Wong }
782d44b47fdSDarrick J. Wong }
783d44b47fdSDarrick J. Wong
784d44b47fdSDarrick J. Wong out_free:
785be1317fdSDarrick J. Wong kvfree(sai.entries);
786ab9d5dc5SDarrick J. Wong out:
787ab9d5dc5SDarrick J. Wong return error;
788ab9d5dc5SDarrick J. Wong }
789a12890aeSDarrick J. Wong
790a12890aeSDarrick J. Wong /* AGI */
791a12890aeSDarrick J. Wong
7922e6f2756SDarrick J. Wong /* Check agi_count/agi_freecount */
7932e6f2756SDarrick J. Wong static inline void
xchk_agi_xref_icounts(struct xfs_scrub * sc)794c517b3aaSDarrick J. Wong xchk_agi_xref_icounts(
7951d8a748aSDarrick J. Wong struct xfs_scrub *sc)
7962e6f2756SDarrick J. Wong {
797370c782bSChristoph Hellwig struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
7982e6f2756SDarrick J. Wong xfs_agino_t icount;
7992e6f2756SDarrick J. Wong xfs_agino_t freecount;
8002e6f2756SDarrick J. Wong int error;
8012e6f2756SDarrick J. Wong
8022e6f2756SDarrick J. Wong if (!sc->sa.ino_cur)
8032e6f2756SDarrick J. Wong return;
8042e6f2756SDarrick J. Wong
8052e6f2756SDarrick J. Wong error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
806c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
8072e6f2756SDarrick J. Wong return;
8082e6f2756SDarrick J. Wong if (be32_to_cpu(agi->agi_count) != icount ||
8092e6f2756SDarrick J. Wong be32_to_cpu(agi->agi_freecount) != freecount)
810c517b3aaSDarrick J. Wong xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
8112e6f2756SDarrick J. Wong }
8122e6f2756SDarrick J. Wong
8131dbbff02SDarrick J. Wong /* Check agi_[fi]blocks against tree size */
8141dbbff02SDarrick J. Wong static inline void
xchk_agi_xref_fiblocks(struct xfs_scrub * sc)8151dbbff02SDarrick J. Wong xchk_agi_xref_fiblocks(
8161dbbff02SDarrick J. Wong struct xfs_scrub *sc)
8171dbbff02SDarrick J. Wong {
8181dbbff02SDarrick J. Wong struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
8191dbbff02SDarrick J. Wong xfs_agblock_t blocks;
8201dbbff02SDarrick J. Wong int error = 0;
8211dbbff02SDarrick J. Wong
822ebd9027dSDave Chinner if (!xfs_has_inobtcounts(sc->mp))
8231dbbff02SDarrick J. Wong return;
8241dbbff02SDarrick J. Wong
8251dbbff02SDarrick J. Wong if (sc->sa.ino_cur) {
8261dbbff02SDarrick J. Wong error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks);
8271dbbff02SDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
8281dbbff02SDarrick J. Wong return;
8291dbbff02SDarrick J. Wong if (blocks != be32_to_cpu(agi->agi_iblocks))
8301dbbff02SDarrick J. Wong xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
8311dbbff02SDarrick J. Wong }
8321dbbff02SDarrick J. Wong
8331dbbff02SDarrick J. Wong if (sc->sa.fino_cur) {
8341dbbff02SDarrick J. Wong error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks);
8351dbbff02SDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur))
8361dbbff02SDarrick J. Wong return;
8371dbbff02SDarrick J. Wong if (blocks != be32_to_cpu(agi->agi_fblocks))
8381dbbff02SDarrick J. Wong xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
8391dbbff02SDarrick J. Wong }
8401dbbff02SDarrick J. Wong }
8411dbbff02SDarrick J. Wong
842166d7641SDarrick J. Wong /* Cross-reference with the other btrees. */
843166d7641SDarrick J. Wong STATIC void
xchk_agi_xref(struct xfs_scrub * sc)844c517b3aaSDarrick J. Wong xchk_agi_xref(
8451d8a748aSDarrick J. Wong struct xfs_scrub *sc)
846166d7641SDarrick J. Wong {
84752dc4b44SDarrick J. Wong struct xfs_mount *mp = sc->mp;
84852dc4b44SDarrick J. Wong xfs_agblock_t agbno;
84952dc4b44SDarrick J. Wong
850166d7641SDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
851166d7641SDarrick J. Wong return;
85252dc4b44SDarrick J. Wong
85352dc4b44SDarrick J. Wong agbno = XFS_AGI_BLOCK(mp);
85452dc4b44SDarrick J. Wong
855f53acfacSDarrick J. Wong xchk_ag_btcur_init(sc, &sc->sa);
85652dc4b44SDarrick J. Wong
857c517b3aaSDarrick J. Wong xchk_xref_is_used_space(sc, agbno, 1);
858c517b3aaSDarrick J. Wong xchk_xref_is_not_inode_chunk(sc, agbno, 1);
859c517b3aaSDarrick J. Wong xchk_agi_xref_icounts(sc);
860*69115f77SDarrick J. Wong xchk_xref_is_only_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
861c517b3aaSDarrick J. Wong xchk_xref_is_not_shared(sc, agbno, 1);
8627ac14fa2SDarrick J. Wong xchk_xref_is_not_cow_staging(sc, agbno, 1);
8631dbbff02SDarrick J. Wong xchk_agi_xref_fiblocks(sc);
86452dc4b44SDarrick J. Wong
86552dc4b44SDarrick J. Wong /* scrub teardown will take care of sc->sa for us */
866166d7641SDarrick J. Wong }
867166d7641SDarrick J. Wong
868a12890aeSDarrick J. Wong /* Scrub the AGI. */
869a12890aeSDarrick J. Wong int
xchk_agi(struct xfs_scrub * sc)870c517b3aaSDarrick J. Wong xchk_agi(
8711d8a748aSDarrick J. Wong struct xfs_scrub *sc)
872a12890aeSDarrick J. Wong {
873a12890aeSDarrick J. Wong struct xfs_mount *mp = sc->mp;
874a12890aeSDarrick J. Wong struct xfs_agi *agi;
87547cd97b5SDarrick J. Wong struct xfs_perag *pag;
876f4585e82SDarrick J. Wong struct xfs_ino_geometry *igeo = M_IGEO(sc->mp);
877de9d2a78SDarrick J. Wong xfs_agnumber_t agno = sc->sm->sm_agno;
878a12890aeSDarrick J. Wong xfs_agblock_t agbno;
879a12890aeSDarrick J. Wong xfs_agblock_t eoag;
880a12890aeSDarrick J. Wong xfs_agino_t agino;
881a12890aeSDarrick J. Wong xfs_agino_t first_agino;
882a12890aeSDarrick J. Wong xfs_agino_t last_agino;
883a12890aeSDarrick J. Wong xfs_agino_t icount;
884a12890aeSDarrick J. Wong int i;
885a12890aeSDarrick J. Wong int level;
886a12890aeSDarrick J. Wong int error = 0;
887a12890aeSDarrick J. Wong
888de9d2a78SDarrick J. Wong error = xchk_ag_read_headers(sc, agno, &sc->sa);
889c517b3aaSDarrick J. Wong if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
890a12890aeSDarrick J. Wong goto out;
891c517b3aaSDarrick J. Wong xchk_buffer_recheck(sc, sc->sa.agi_bp);
892a12890aeSDarrick J. Wong
893370c782bSChristoph Hellwig agi = sc->sa.agi_bp->b_addr;
89448c6615cSDarrick J. Wong pag = sc->sa.pag;
895a12890aeSDarrick J. Wong
896a12890aeSDarrick J. Wong /* Check the AG length */
897a12890aeSDarrick J. Wong eoag = be32_to_cpu(agi->agi_length);
8980800169eSDave Chinner if (eoag != pag->block_count)
899c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
900a12890aeSDarrick J. Wong
901a12890aeSDarrick J. Wong /* Check btree roots and levels */
902a12890aeSDarrick J. Wong agbno = be32_to_cpu(agi->agi_root);
9030800169eSDave Chinner if (!xfs_verify_agbno(pag, agbno))
904c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
905a12890aeSDarrick J. Wong
906a12890aeSDarrick J. Wong level = be32_to_cpu(agi->agi_level);
907f4585e82SDarrick J. Wong if (level <= 0 || level > igeo->inobt_maxlevels)
908c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
909a12890aeSDarrick J. Wong
91038c26bfdSDave Chinner if (xfs_has_finobt(mp)) {
911a12890aeSDarrick J. Wong agbno = be32_to_cpu(agi->agi_free_root);
9120800169eSDave Chinner if (!xfs_verify_agbno(pag, agbno))
913c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
914a12890aeSDarrick J. Wong
915a12890aeSDarrick J. Wong level = be32_to_cpu(agi->agi_free_level);
916f4585e82SDarrick J. Wong if (level <= 0 || level > igeo->inobt_maxlevels)
917c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
918a12890aeSDarrick J. Wong }
919a12890aeSDarrick J. Wong
920a12890aeSDarrick J. Wong /* Check inode counters */
92186210fbeSDave Chinner xfs_agino_range(mp, agno, &first_agino, &last_agino);
922a12890aeSDarrick J. Wong icount = be32_to_cpu(agi->agi_count);
923a12890aeSDarrick J. Wong if (icount > last_agino - first_agino + 1 ||
924a12890aeSDarrick J. Wong icount < be32_to_cpu(agi->agi_freecount))
925c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
926a12890aeSDarrick J. Wong
927a12890aeSDarrick J. Wong /* Check inode pointers */
928a12890aeSDarrick J. Wong agino = be32_to_cpu(agi->agi_newino);
9292d6ca832SDave Chinner if (!xfs_verify_agino_or_null(pag, agino))
930c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
931a12890aeSDarrick J. Wong
932a12890aeSDarrick J. Wong agino = be32_to_cpu(agi->agi_dirino);
9332d6ca832SDave Chinner if (!xfs_verify_agino_or_null(pag, agino))
934c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
935a12890aeSDarrick J. Wong
936a12890aeSDarrick J. Wong /* Check unlinked inode buckets */
937a12890aeSDarrick J. Wong for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
938a12890aeSDarrick J. Wong agino = be32_to_cpu(agi->agi_unlinked[i]);
9392d6ca832SDave Chinner if (!xfs_verify_agino_or_null(pag, agino))
940c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
941a12890aeSDarrick J. Wong }
942a12890aeSDarrick J. Wong
943a12890aeSDarrick J. Wong if (agi->agi_pad32 != cpu_to_be32(0))
944c517b3aaSDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
945a12890aeSDarrick J. Wong
94647cd97b5SDarrick J. Wong /* Do the incore counters match? */
94747cd97b5SDarrick J. Wong if (pag->pagi_count != be32_to_cpu(agi->agi_count))
94847cd97b5SDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
94947cd97b5SDarrick J. Wong if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount))
95047cd97b5SDarrick J. Wong xchk_block_set_corrupt(sc, sc->sa.agi_bp);
95147cd97b5SDarrick J. Wong
952c517b3aaSDarrick J. Wong xchk_agi_xref(sc);
953a12890aeSDarrick J. Wong out:
954a12890aeSDarrick J. Wong return error;
955a12890aeSDarrick J. Wong }
956