xref: /openbmc/linux/fs/xfs/scrub/agheader.c (revision 9a32dd32)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2017-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_sb.h"
14 #include "xfs_alloc.h"
15 #include "xfs_ialloc.h"
16 #include "xfs_rmap.h"
17 #include "xfs_ag.h"
18 #include "scrub/scrub.h"
19 #include "scrub/common.h"
20 
21 int
22 xchk_setup_agheader(
23 	struct xfs_scrub	*sc)
24 {
25 	if (xchk_need_intent_drain(sc))
26 		xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
27 	return xchk_setup_fs(sc);
28 }
29 
30 /* Superblock */
31 
32 /* Cross-reference with the other btrees. */
33 STATIC void
34 xchk_superblock_xref(
35 	struct xfs_scrub	*sc,
36 	struct xfs_buf		*bp)
37 {
38 	struct xfs_mount	*mp = sc->mp;
39 	xfs_agnumber_t		agno = sc->sm->sm_agno;
40 	xfs_agblock_t		agbno;
41 	int			error;
42 
43 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
44 		return;
45 
46 	agbno = XFS_SB_BLOCK(mp);
47 
48 	error = xchk_ag_init_existing(sc, agno, &sc->sa);
49 	if (!xchk_xref_process_error(sc, agno, agbno, &error))
50 		return;
51 
52 	xchk_xref_is_used_space(sc, agbno, 1);
53 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
54 	xchk_xref_is_only_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
55 	xchk_xref_is_not_shared(sc, agbno, 1);
56 	xchk_xref_is_not_cow_staging(sc, agbno, 1);
57 
58 	/* scrub teardown will take care of sc->sa for us */
59 }
60 
61 /*
62  * Scrub the filesystem superblock.
63  *
64  * Note: We do /not/ attempt to check AG 0's superblock.  Mount is
65  * responsible for validating all the geometry information in sb 0, so
66  * if the filesystem is capable of initiating online scrub, then clearly
67  * sb 0 is ok and we can use its information to check everything else.
68  */
69 int
70 xchk_superblock(
71 	struct xfs_scrub	*sc)
72 {
73 	struct xfs_mount	*mp = sc->mp;
74 	struct xfs_buf		*bp;
75 	struct xfs_dsb		*sb;
76 	struct xfs_perag	*pag;
77 	xfs_agnumber_t		agno;
78 	uint32_t		v2_ok;
79 	__be32			features_mask;
80 	int			error;
81 	__be16			vernum_mask;
82 
83 	agno = sc->sm->sm_agno;
84 	if (agno == 0)
85 		return 0;
86 
87 	/*
88 	 * Grab an active reference to the perag structure.  If we can't get
89 	 * it, we're racing with something that's tearing down the AG, so
90 	 * signal that the AG no longer exists.
91 	 */
92 	pag = xfs_perag_get(mp, agno);
93 	if (!pag)
94 		return -ENOENT;
95 
96 	error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
97 	/*
98 	 * The superblock verifier can return several different error codes
99 	 * if it thinks the superblock doesn't look right.  For a mount these
100 	 * would all get bounced back to userspace, but if we're here then the
101 	 * fs mounted successfully, which means that this secondary superblock
102 	 * is simply incorrect.  Treat all these codes the same way we treat
103 	 * any corruption.
104 	 */
105 	switch (error) {
106 	case -EINVAL:	/* also -EWRONGFS */
107 	case -ENOSYS:
108 	case -EFBIG:
109 		error = -EFSCORRUPTED;
110 		fallthrough;
111 	default:
112 		break;
113 	}
114 	if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
115 		goto out_pag;
116 
117 	sb = bp->b_addr;
118 
119 	/*
120 	 * Verify the geometries match.  Fields that are permanently
121 	 * set by mkfs are checked; fields that can be updated later
122 	 * (and are not propagated to backup superblocks) are preen
123 	 * checked.
124 	 */
125 	if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
126 		xchk_block_set_corrupt(sc, bp);
127 
128 	if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
129 		xchk_block_set_corrupt(sc, bp);
130 
131 	if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
132 		xchk_block_set_corrupt(sc, bp);
133 
134 	if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
135 		xchk_block_set_corrupt(sc, bp);
136 
137 	if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
138 		xchk_block_set_preen(sc, bp);
139 
140 	if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
141 		xchk_block_set_corrupt(sc, bp);
142 
143 	if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
144 		xchk_block_set_preen(sc, bp);
145 
146 	if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
147 		xchk_block_set_preen(sc, bp);
148 
149 	if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
150 		xchk_block_set_preen(sc, bp);
151 
152 	if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
153 		xchk_block_set_corrupt(sc, bp);
154 
155 	if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
156 		xchk_block_set_corrupt(sc, bp);
157 
158 	if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
159 		xchk_block_set_corrupt(sc, bp);
160 
161 	if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
162 		xchk_block_set_corrupt(sc, bp);
163 
164 	if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
165 		xchk_block_set_corrupt(sc, bp);
166 
167 	/* Check sb_versionnum bits that are set at mkfs time. */
168 	vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
169 				  XFS_SB_VERSION_NUMBITS |
170 				  XFS_SB_VERSION_ALIGNBIT |
171 				  XFS_SB_VERSION_DALIGNBIT |
172 				  XFS_SB_VERSION_SHAREDBIT |
173 				  XFS_SB_VERSION_LOGV2BIT |
174 				  XFS_SB_VERSION_SECTORBIT |
175 				  XFS_SB_VERSION_EXTFLGBIT |
176 				  XFS_SB_VERSION_DIRV2BIT);
177 	if ((sb->sb_versionnum & vernum_mask) !=
178 	    (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
179 		xchk_block_set_corrupt(sc, bp);
180 
181 	/* Check sb_versionnum bits that can be set after mkfs time. */
182 	vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
183 				  XFS_SB_VERSION_NLINKBIT |
184 				  XFS_SB_VERSION_QUOTABIT);
185 	if ((sb->sb_versionnum & vernum_mask) !=
186 	    (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
187 		xchk_block_set_preen(sc, bp);
188 
189 	if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
190 		xchk_block_set_corrupt(sc, bp);
191 
192 	if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
193 		xchk_block_set_corrupt(sc, bp);
194 
195 	if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
196 		xchk_block_set_corrupt(sc, bp);
197 
198 	if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
199 		xchk_block_set_preen(sc, bp);
200 
201 	if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
202 		xchk_block_set_corrupt(sc, bp);
203 
204 	if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
205 		xchk_block_set_corrupt(sc, bp);
206 
207 	if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
208 		xchk_block_set_corrupt(sc, bp);
209 
210 	if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
211 		xchk_block_set_corrupt(sc, bp);
212 
213 	if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
214 		xchk_block_set_corrupt(sc, bp);
215 
216 	if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
217 		xchk_block_set_corrupt(sc, bp);
218 
219 	if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
220 		xchk_block_set_preen(sc, bp);
221 
222 	/*
223 	 * Skip the summary counters since we track them in memory anyway.
224 	 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
225 	 */
226 
227 	if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
228 		xchk_block_set_preen(sc, bp);
229 
230 	if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
231 		xchk_block_set_preen(sc, bp);
232 
233 	/*
234 	 * Skip the quota flags since repair will force quotacheck.
235 	 * sb_qflags
236 	 */
237 
238 	if (sb->sb_flags != mp->m_sb.sb_flags)
239 		xchk_block_set_corrupt(sc, bp);
240 
241 	if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
242 		xchk_block_set_corrupt(sc, bp);
243 
244 	if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
245 		xchk_block_set_corrupt(sc, bp);
246 
247 	if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
248 		xchk_block_set_preen(sc, bp);
249 
250 	if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
251 		xchk_block_set_preen(sc, bp);
252 
253 	if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
254 		xchk_block_set_corrupt(sc, bp);
255 
256 	if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
257 		xchk_block_set_corrupt(sc, bp);
258 
259 	if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
260 		xchk_block_set_corrupt(sc, bp);
261 
262 	if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
263 		xchk_block_set_corrupt(sc, bp);
264 
265 	/* Do we see any invalid bits in sb_features2? */
266 	if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
267 		if (sb->sb_features2 != 0)
268 			xchk_block_set_corrupt(sc, bp);
269 	} else {
270 		v2_ok = XFS_SB_VERSION2_OKBITS;
271 		if (xfs_sb_is_v5(&mp->m_sb))
272 			v2_ok |= XFS_SB_VERSION2_CRCBIT;
273 
274 		if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
275 			xchk_block_set_corrupt(sc, bp);
276 
277 		if (sb->sb_features2 != sb->sb_bad_features2)
278 			xchk_block_set_preen(sc, bp);
279 	}
280 
281 	/* Check sb_features2 flags that are set at mkfs time. */
282 	features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
283 				    XFS_SB_VERSION2_PROJID32BIT |
284 				    XFS_SB_VERSION2_CRCBIT |
285 				    XFS_SB_VERSION2_FTYPE);
286 	if ((sb->sb_features2 & features_mask) !=
287 	    (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
288 		xchk_block_set_corrupt(sc, bp);
289 
290 	/* Check sb_features2 flags that can be set after mkfs time. */
291 	features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
292 	if ((sb->sb_features2 & features_mask) !=
293 	    (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
294 		xchk_block_set_preen(sc, bp);
295 
296 	if (!xfs_has_crc(mp)) {
297 		/* all v5 fields must be zero */
298 		if (memchr_inv(&sb->sb_features_compat, 0,
299 				sizeof(struct xfs_dsb) -
300 				offsetof(struct xfs_dsb, sb_features_compat)))
301 			xchk_block_set_corrupt(sc, bp);
302 	} else {
303 		/* compat features must match */
304 		if (sb->sb_features_compat !=
305 				cpu_to_be32(mp->m_sb.sb_features_compat))
306 			xchk_block_set_corrupt(sc, bp);
307 
308 		/* ro compat features must match */
309 		if (sb->sb_features_ro_compat !=
310 				cpu_to_be32(mp->m_sb.sb_features_ro_compat))
311 			xchk_block_set_corrupt(sc, bp);
312 
313 		/*
314 		 * NEEDSREPAIR is ignored on a secondary super, so we should
315 		 * clear it when we find it, though it's not a corruption.
316 		 */
317 		features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR);
318 		if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^
319 				sb->sb_features_incompat) & features_mask)
320 			xchk_block_set_preen(sc, bp);
321 
322 		/* all other incompat features must match */
323 		if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^
324 				sb->sb_features_incompat) & ~features_mask)
325 			xchk_block_set_corrupt(sc, bp);
326 
327 		/*
328 		 * log incompat features protect newer log record types from
329 		 * older log recovery code.  Log recovery doesn't check the
330 		 * secondary supers, so we can clear these if needed.
331 		 */
332 		if (sb->sb_features_log_incompat)
333 			xchk_block_set_preen(sc, bp);
334 
335 		/* Don't care about sb_crc */
336 
337 		if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
338 			xchk_block_set_corrupt(sc, bp);
339 
340 		if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
341 			xchk_block_set_preen(sc, bp);
342 
343 		/* Don't care about sb_lsn */
344 	}
345 
346 	if (xfs_has_metauuid(mp)) {
347 		/* The metadata UUID must be the same for all supers */
348 		if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
349 			xchk_block_set_corrupt(sc, bp);
350 	}
351 
352 	/* Everything else must be zero. */
353 	if (memchr_inv(sb + 1, 0,
354 			BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
355 		xchk_block_set_corrupt(sc, bp);
356 
357 	xchk_superblock_xref(sc, bp);
358 out_pag:
359 	xfs_perag_put(pag);
360 	return error;
361 }
362 
363 /* AGF */
364 
365 /* Tally freespace record lengths. */
366 STATIC int
367 xchk_agf_record_bno_lengths(
368 	struct xfs_btree_cur		*cur,
369 	const struct xfs_alloc_rec_incore *rec,
370 	void				*priv)
371 {
372 	xfs_extlen_t			*blocks = priv;
373 
374 	(*blocks) += rec->ar_blockcount;
375 	return 0;
376 }
377 
378 /* Check agf_freeblks */
379 static inline void
380 xchk_agf_xref_freeblks(
381 	struct xfs_scrub	*sc)
382 {
383 	struct xfs_agf		*agf = sc->sa.agf_bp->b_addr;
384 	xfs_extlen_t		blocks = 0;
385 	int			error;
386 
387 	if (!sc->sa.bno_cur)
388 		return;
389 
390 	error = xfs_alloc_query_all(sc->sa.bno_cur,
391 			xchk_agf_record_bno_lengths, &blocks);
392 	if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
393 		return;
394 	if (blocks != be32_to_cpu(agf->agf_freeblks))
395 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
396 }
397 
398 /* Cross reference the AGF with the cntbt (freespace by length btree) */
399 static inline void
400 xchk_agf_xref_cntbt(
401 	struct xfs_scrub	*sc)
402 {
403 	struct xfs_agf		*agf = sc->sa.agf_bp->b_addr;
404 	xfs_agblock_t		agbno;
405 	xfs_extlen_t		blocks;
406 	int			have;
407 	int			error;
408 
409 	if (!sc->sa.cnt_cur)
410 		return;
411 
412 	/* Any freespace at all? */
413 	error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
414 	if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
415 		return;
416 	if (!have) {
417 		if (agf->agf_freeblks != cpu_to_be32(0))
418 			xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
419 		return;
420 	}
421 
422 	/* Check agf_longest */
423 	error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
424 	if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
425 		return;
426 	if (!have || blocks != be32_to_cpu(agf->agf_longest))
427 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
428 }
429 
430 /* Check the btree block counts in the AGF against the btrees. */
431 STATIC void
432 xchk_agf_xref_btreeblks(
433 	struct xfs_scrub	*sc)
434 {
435 	struct xfs_agf		*agf = sc->sa.agf_bp->b_addr;
436 	struct xfs_mount	*mp = sc->mp;
437 	xfs_agblock_t		blocks;
438 	xfs_agblock_t		btreeblks;
439 	int			error;
440 
441 	/* agf_btreeblks didn't exist before lazysbcount */
442 	if (!xfs_has_lazysbcount(sc->mp))
443 		return;
444 
445 	/* Check agf_rmap_blocks; set up for agf_btreeblks check */
446 	if (sc->sa.rmap_cur) {
447 		error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
448 		if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
449 			return;
450 		btreeblks = blocks - 1;
451 		if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
452 			xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
453 	} else {
454 		btreeblks = 0;
455 	}
456 
457 	/*
458 	 * No rmap cursor; we can't xref if we have the rmapbt feature.
459 	 * We also can't do it if we're missing the free space btree cursors.
460 	 */
461 	if ((xfs_has_rmapbt(mp) && !sc->sa.rmap_cur) ||
462 	    !sc->sa.bno_cur || !sc->sa.cnt_cur)
463 		return;
464 
465 	/* Check agf_btreeblks */
466 	error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
467 	if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
468 		return;
469 	btreeblks += blocks - 1;
470 
471 	error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
472 	if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
473 		return;
474 	btreeblks += blocks - 1;
475 
476 	if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
477 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
478 }
479 
480 /* Check agf_refcount_blocks against tree size */
481 static inline void
482 xchk_agf_xref_refcblks(
483 	struct xfs_scrub	*sc)
484 {
485 	struct xfs_agf		*agf = sc->sa.agf_bp->b_addr;
486 	xfs_agblock_t		blocks;
487 	int			error;
488 
489 	if (!sc->sa.refc_cur)
490 		return;
491 
492 	error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
493 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
494 		return;
495 	if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
496 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
497 }
498 
499 /* Cross-reference with the other btrees. */
500 STATIC void
501 xchk_agf_xref(
502 	struct xfs_scrub	*sc)
503 {
504 	struct xfs_mount	*mp = sc->mp;
505 	xfs_agblock_t		agbno;
506 
507 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
508 		return;
509 
510 	agbno = XFS_AGF_BLOCK(mp);
511 
512 	xchk_ag_btcur_init(sc, &sc->sa);
513 
514 	xchk_xref_is_used_space(sc, agbno, 1);
515 	xchk_agf_xref_freeblks(sc);
516 	xchk_agf_xref_cntbt(sc);
517 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
518 	xchk_xref_is_only_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
519 	xchk_agf_xref_btreeblks(sc);
520 	xchk_xref_is_not_shared(sc, agbno, 1);
521 	xchk_xref_is_not_cow_staging(sc, agbno, 1);
522 	xchk_agf_xref_refcblks(sc);
523 
524 	/* scrub teardown will take care of sc->sa for us */
525 }
526 
527 /* Scrub the AGF. */
528 int
529 xchk_agf(
530 	struct xfs_scrub	*sc)
531 {
532 	struct xfs_mount	*mp = sc->mp;
533 	struct xfs_agf		*agf;
534 	struct xfs_perag	*pag;
535 	xfs_agnumber_t		agno = sc->sm->sm_agno;
536 	xfs_agblock_t		agbno;
537 	xfs_agblock_t		eoag;
538 	xfs_agblock_t		agfl_first;
539 	xfs_agblock_t		agfl_last;
540 	xfs_agblock_t		agfl_count;
541 	xfs_agblock_t		fl_count;
542 	int			level;
543 	int			error = 0;
544 
545 	error = xchk_ag_read_headers(sc, agno, &sc->sa);
546 	if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
547 		goto out;
548 	xchk_buffer_recheck(sc, sc->sa.agf_bp);
549 
550 	agf = sc->sa.agf_bp->b_addr;
551 	pag = sc->sa.pag;
552 
553 	/* Check the AG length */
554 	eoag = be32_to_cpu(agf->agf_length);
555 	if (eoag != pag->block_count)
556 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
557 
558 	/* Check the AGF btree roots and levels */
559 	agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
560 	if (!xfs_verify_agbno(pag, agbno))
561 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
562 
563 	agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
564 	if (!xfs_verify_agbno(pag, agbno))
565 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
566 
567 	level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
568 	if (level <= 0 || level > mp->m_alloc_maxlevels)
569 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
570 
571 	level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
572 	if (level <= 0 || level > mp->m_alloc_maxlevels)
573 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
574 
575 	if (xfs_has_rmapbt(mp)) {
576 		agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
577 		if (!xfs_verify_agbno(pag, agbno))
578 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
579 
580 		level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
581 		if (level <= 0 || level > mp->m_rmap_maxlevels)
582 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
583 	}
584 
585 	if (xfs_has_reflink(mp)) {
586 		agbno = be32_to_cpu(agf->agf_refcount_root);
587 		if (!xfs_verify_agbno(pag, agbno))
588 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
589 
590 		level = be32_to_cpu(agf->agf_refcount_level);
591 		if (level <= 0 || level > mp->m_refc_maxlevels)
592 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
593 	}
594 
595 	/* Check the AGFL counters */
596 	agfl_first = be32_to_cpu(agf->agf_flfirst);
597 	agfl_last = be32_to_cpu(agf->agf_fllast);
598 	agfl_count = be32_to_cpu(agf->agf_flcount);
599 	if (agfl_last > agfl_first)
600 		fl_count = agfl_last - agfl_first + 1;
601 	else
602 		fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
603 	if (agfl_count != 0 && fl_count != agfl_count)
604 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
605 
606 	/* Do the incore counters match? */
607 	if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks))
608 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
609 	if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount))
610 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
611 	if (xfs_has_lazysbcount(sc->mp) &&
612 	    pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks))
613 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
614 
615 	xchk_agf_xref(sc);
616 out:
617 	return error;
618 }
619 
620 /* AGFL */
621 
622 struct xchk_agfl_info {
623 	/* Number of AGFL entries that the AGF claims are in use. */
624 	unsigned int		agflcount;
625 
626 	/* Number of AGFL entries that we found. */
627 	unsigned int		nr_entries;
628 
629 	/* Buffer to hold AGFL entries for extent checking. */
630 	xfs_agblock_t		*entries;
631 
632 	struct xfs_buf		*agfl_bp;
633 	struct xfs_scrub	*sc;
634 };
635 
636 /* Cross-reference with the other btrees. */
637 STATIC void
638 xchk_agfl_block_xref(
639 	struct xfs_scrub	*sc,
640 	xfs_agblock_t		agbno)
641 {
642 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
643 		return;
644 
645 	xchk_xref_is_used_space(sc, agbno, 1);
646 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
647 	xchk_xref_is_only_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
648 	xchk_xref_is_not_shared(sc, agbno, 1);
649 	xchk_xref_is_not_cow_staging(sc, agbno, 1);
650 }
651 
652 /* Scrub an AGFL block. */
653 STATIC int
654 xchk_agfl_block(
655 	struct xfs_mount	*mp,
656 	xfs_agblock_t		agbno,
657 	void			*priv)
658 {
659 	struct xchk_agfl_info	*sai = priv;
660 	struct xfs_scrub	*sc = sai->sc;
661 
662 	if (xfs_verify_agbno(sc->sa.pag, agbno) &&
663 	    sai->nr_entries < sai->agflcount)
664 		sai->entries[sai->nr_entries++] = agbno;
665 	else
666 		xchk_block_set_corrupt(sc, sai->agfl_bp);
667 
668 	xchk_agfl_block_xref(sc, agbno);
669 
670 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
671 		return -ECANCELED;
672 
673 	return 0;
674 }
675 
676 static int
677 xchk_agblock_cmp(
678 	const void		*pa,
679 	const void		*pb)
680 {
681 	const xfs_agblock_t	*a = pa;
682 	const xfs_agblock_t	*b = pb;
683 
684 	return (int)*a - (int)*b;
685 }
686 
687 /* Cross-reference with the other btrees. */
688 STATIC void
689 xchk_agfl_xref(
690 	struct xfs_scrub	*sc)
691 {
692 	struct xfs_mount	*mp = sc->mp;
693 	xfs_agblock_t		agbno;
694 
695 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
696 		return;
697 
698 	agbno = XFS_AGFL_BLOCK(mp);
699 
700 	xchk_ag_btcur_init(sc, &sc->sa);
701 
702 	xchk_xref_is_used_space(sc, agbno, 1);
703 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
704 	xchk_xref_is_only_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
705 	xchk_xref_is_not_shared(sc, agbno, 1);
706 	xchk_xref_is_not_cow_staging(sc, agbno, 1);
707 
708 	/*
709 	 * Scrub teardown will take care of sc->sa for us.  Leave sc->sa
710 	 * active so that the agfl block xref can use it too.
711 	 */
712 }
713 
714 /* Scrub the AGFL. */
715 int
716 xchk_agfl(
717 	struct xfs_scrub	*sc)
718 {
719 	struct xchk_agfl_info	sai = {
720 		.sc		= sc,
721 	};
722 	struct xfs_agf		*agf;
723 	xfs_agnumber_t		agno = sc->sm->sm_agno;
724 	unsigned int		i;
725 	int			error;
726 
727 	/* Lock the AGF and AGI so that nobody can touch this AG. */
728 	error = xchk_ag_read_headers(sc, agno, &sc->sa);
729 	if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
730 		return error;
731 	if (!sc->sa.agf_bp)
732 		return -EFSCORRUPTED;
733 
734 	/* Try to read the AGFL, and verify its structure if we get it. */
735 	error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &sai.agfl_bp);
736 	if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
737 		return error;
738 	xchk_buffer_recheck(sc, sai.agfl_bp);
739 
740 	xchk_agfl_xref(sc);
741 
742 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
743 		goto out;
744 
745 	/* Allocate buffer to ensure uniqueness of AGFL entries. */
746 	agf = sc->sa.agf_bp->b_addr;
747 	sai.agflcount = be32_to_cpu(agf->agf_flcount);
748 	if (sai.agflcount > xfs_agfl_size(sc->mp)) {
749 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
750 		goto out;
751 	}
752 	sai.entries = kvcalloc(sai.agflcount, sizeof(xfs_agblock_t),
753 			       XCHK_GFP_FLAGS);
754 	if (!sai.entries) {
755 		error = -ENOMEM;
756 		goto out;
757 	}
758 
759 	/* Check the blocks in the AGFL. */
760 	error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr, sai.agfl_bp,
761 			xchk_agfl_block, &sai);
762 	if (error == -ECANCELED) {
763 		error = 0;
764 		goto out_free;
765 	}
766 	if (error)
767 		goto out_free;
768 
769 	if (sai.agflcount != sai.nr_entries) {
770 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
771 		goto out_free;
772 	}
773 
774 	/* Sort entries, check for duplicates. */
775 	sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
776 			xchk_agblock_cmp, NULL);
777 	for (i = 1; i < sai.nr_entries; i++) {
778 		if (sai.entries[i] == sai.entries[i - 1]) {
779 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
780 			break;
781 		}
782 	}
783 
784 out_free:
785 	kvfree(sai.entries);
786 out:
787 	return error;
788 }
789 
790 /* AGI */
791 
792 /* Check agi_count/agi_freecount */
793 static inline void
794 xchk_agi_xref_icounts(
795 	struct xfs_scrub	*sc)
796 {
797 	struct xfs_agi		*agi = sc->sa.agi_bp->b_addr;
798 	xfs_agino_t		icount;
799 	xfs_agino_t		freecount;
800 	int			error;
801 
802 	if (!sc->sa.ino_cur)
803 		return;
804 
805 	error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
806 	if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
807 		return;
808 	if (be32_to_cpu(agi->agi_count) != icount ||
809 	    be32_to_cpu(agi->agi_freecount) != freecount)
810 		xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
811 }
812 
813 /* Check agi_[fi]blocks against tree size */
814 static inline void
815 xchk_agi_xref_fiblocks(
816 	struct xfs_scrub	*sc)
817 {
818 	struct xfs_agi		*agi = sc->sa.agi_bp->b_addr;
819 	xfs_agblock_t		blocks;
820 	int			error = 0;
821 
822 	if (!xfs_has_inobtcounts(sc->mp))
823 		return;
824 
825 	if (sc->sa.ino_cur) {
826 		error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks);
827 		if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
828 			return;
829 		if (blocks != be32_to_cpu(agi->agi_iblocks))
830 			xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
831 	}
832 
833 	if (sc->sa.fino_cur) {
834 		error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks);
835 		if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur))
836 			return;
837 		if (blocks != be32_to_cpu(agi->agi_fblocks))
838 			xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
839 	}
840 }
841 
842 /* Cross-reference with the other btrees. */
843 STATIC void
844 xchk_agi_xref(
845 	struct xfs_scrub	*sc)
846 {
847 	struct xfs_mount	*mp = sc->mp;
848 	xfs_agblock_t		agbno;
849 
850 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
851 		return;
852 
853 	agbno = XFS_AGI_BLOCK(mp);
854 
855 	xchk_ag_btcur_init(sc, &sc->sa);
856 
857 	xchk_xref_is_used_space(sc, agbno, 1);
858 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
859 	xchk_agi_xref_icounts(sc);
860 	xchk_xref_is_only_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
861 	xchk_xref_is_not_shared(sc, agbno, 1);
862 	xchk_xref_is_not_cow_staging(sc, agbno, 1);
863 	xchk_agi_xref_fiblocks(sc);
864 
865 	/* scrub teardown will take care of sc->sa for us */
866 }
867 
868 /* Scrub the AGI. */
869 int
870 xchk_agi(
871 	struct xfs_scrub	*sc)
872 {
873 	struct xfs_mount	*mp = sc->mp;
874 	struct xfs_agi		*agi;
875 	struct xfs_perag	*pag;
876 	struct xfs_ino_geometry	*igeo = M_IGEO(sc->mp);
877 	xfs_agnumber_t		agno = sc->sm->sm_agno;
878 	xfs_agblock_t		agbno;
879 	xfs_agblock_t		eoag;
880 	xfs_agino_t		agino;
881 	xfs_agino_t		first_agino;
882 	xfs_agino_t		last_agino;
883 	xfs_agino_t		icount;
884 	int			i;
885 	int			level;
886 	int			error = 0;
887 
888 	error = xchk_ag_read_headers(sc, agno, &sc->sa);
889 	if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
890 		goto out;
891 	xchk_buffer_recheck(sc, sc->sa.agi_bp);
892 
893 	agi = sc->sa.agi_bp->b_addr;
894 	pag = sc->sa.pag;
895 
896 	/* Check the AG length */
897 	eoag = be32_to_cpu(agi->agi_length);
898 	if (eoag != pag->block_count)
899 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
900 
901 	/* Check btree roots and levels */
902 	agbno = be32_to_cpu(agi->agi_root);
903 	if (!xfs_verify_agbno(pag, agbno))
904 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
905 
906 	level = be32_to_cpu(agi->agi_level);
907 	if (level <= 0 || level > igeo->inobt_maxlevels)
908 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
909 
910 	if (xfs_has_finobt(mp)) {
911 		agbno = be32_to_cpu(agi->agi_free_root);
912 		if (!xfs_verify_agbno(pag, agbno))
913 			xchk_block_set_corrupt(sc, sc->sa.agi_bp);
914 
915 		level = be32_to_cpu(agi->agi_free_level);
916 		if (level <= 0 || level > igeo->inobt_maxlevels)
917 			xchk_block_set_corrupt(sc, sc->sa.agi_bp);
918 	}
919 
920 	/* Check inode counters */
921 	xfs_agino_range(mp, agno, &first_agino, &last_agino);
922 	icount = be32_to_cpu(agi->agi_count);
923 	if (icount > last_agino - first_agino + 1 ||
924 	    icount < be32_to_cpu(agi->agi_freecount))
925 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
926 
927 	/* Check inode pointers */
928 	agino = be32_to_cpu(agi->agi_newino);
929 	if (!xfs_verify_agino_or_null(pag, agino))
930 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
931 
932 	agino = be32_to_cpu(agi->agi_dirino);
933 	if (!xfs_verify_agino_or_null(pag, agino))
934 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
935 
936 	/* Check unlinked inode buckets */
937 	for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
938 		agino = be32_to_cpu(agi->agi_unlinked[i]);
939 		if (!xfs_verify_agino_or_null(pag, agino))
940 			xchk_block_set_corrupt(sc, sc->sa.agi_bp);
941 	}
942 
943 	if (agi->agi_pad32 != cpu_to_be32(0))
944 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
945 
946 	/* Do the incore counters match? */
947 	if (pag->pagi_count != be32_to_cpu(agi->agi_count))
948 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
949 	if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount))
950 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
951 
952 	xchk_agi_xref(sc);
953 out:
954 	return error;
955 }
956