xref: /openbmc/linux/fs/xfs/scrub/agheader.c (revision 5a170e9e)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
14 #include "xfs_bit.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
17 #include "xfs_sb.h"
18 #include "xfs_inode.h"
19 #include "xfs_alloc.h"
20 #include "xfs_ialloc.h"
21 #include "xfs_rmap.h"
22 #include "scrub/xfs_scrub.h"
23 #include "scrub/scrub.h"
24 #include "scrub/common.h"
25 #include "scrub/trace.h"
26 
27 /* Superblock */
28 
29 /* Cross-reference with the other btrees. */
30 STATIC void
31 xchk_superblock_xref(
32 	struct xfs_scrub	*sc,
33 	struct xfs_buf		*bp)
34 {
35 	struct xfs_mount	*mp = sc->mp;
36 	xfs_agnumber_t		agno = sc->sm->sm_agno;
37 	xfs_agblock_t		agbno;
38 	int			error;
39 
40 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
41 		return;
42 
43 	agbno = XFS_SB_BLOCK(mp);
44 
45 	error = xchk_ag_init(sc, agno, &sc->sa);
46 	if (!xchk_xref_process_error(sc, agno, agbno, &error))
47 		return;
48 
49 	xchk_xref_is_used_space(sc, agbno, 1);
50 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
51 	xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
52 	xchk_xref_is_not_shared(sc, agbno, 1);
53 
54 	/* scrub teardown will take care of sc->sa for us */
55 }
56 
57 /*
58  * Scrub the filesystem superblock.
59  *
60  * Note: We do /not/ attempt to check AG 0's superblock.  Mount is
61  * responsible for validating all the geometry information in sb 0, so
62  * if the filesystem is capable of initiating online scrub, then clearly
63  * sb 0 is ok and we can use its information to check everything else.
64  */
65 int
66 xchk_superblock(
67 	struct xfs_scrub	*sc)
68 {
69 	struct xfs_mount	*mp = sc->mp;
70 	struct xfs_buf		*bp;
71 	struct xfs_dsb		*sb;
72 	xfs_agnumber_t		agno;
73 	uint32_t		v2_ok;
74 	__be32			features_mask;
75 	int			error;
76 	__be16			vernum_mask;
77 
78 	agno = sc->sm->sm_agno;
79 	if (agno == 0)
80 		return 0;
81 
82 	error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
83 	/*
84 	 * The superblock verifier can return several different error codes
85 	 * if it thinks the superblock doesn't look right.  For a mount these
86 	 * would all get bounced back to userspace, but if we're here then the
87 	 * fs mounted successfully, which means that this secondary superblock
88 	 * is simply incorrect.  Treat all these codes the same way we treat
89 	 * any corruption.
90 	 */
91 	switch (error) {
92 	case -EINVAL:	/* also -EWRONGFS */
93 	case -ENOSYS:
94 	case -EFBIG:
95 		error = -EFSCORRUPTED;
96 	default:
97 		break;
98 	}
99 	if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
100 		return error;
101 
102 	sb = XFS_BUF_TO_SBP(bp);
103 
104 	/*
105 	 * Verify the geometries match.  Fields that are permanently
106 	 * set by mkfs are checked; fields that can be updated later
107 	 * (and are not propagated to backup superblocks) are preen
108 	 * checked.
109 	 */
110 	if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
111 		xchk_block_set_corrupt(sc, bp);
112 
113 	if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
114 		xchk_block_set_corrupt(sc, bp);
115 
116 	if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
117 		xchk_block_set_corrupt(sc, bp);
118 
119 	if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
120 		xchk_block_set_corrupt(sc, bp);
121 
122 	if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
123 		xchk_block_set_preen(sc, bp);
124 
125 	if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
126 		xchk_block_set_corrupt(sc, bp);
127 
128 	if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
129 		xchk_block_set_preen(sc, bp);
130 
131 	if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
132 		xchk_block_set_preen(sc, bp);
133 
134 	if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
135 		xchk_block_set_preen(sc, bp);
136 
137 	if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
138 		xchk_block_set_corrupt(sc, bp);
139 
140 	if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
141 		xchk_block_set_corrupt(sc, bp);
142 
143 	if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
144 		xchk_block_set_corrupt(sc, bp);
145 
146 	if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
147 		xchk_block_set_corrupt(sc, bp);
148 
149 	if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
150 		xchk_block_set_corrupt(sc, bp);
151 
152 	/* Check sb_versionnum bits that are set at mkfs time. */
153 	vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
154 				  XFS_SB_VERSION_NUMBITS |
155 				  XFS_SB_VERSION_ALIGNBIT |
156 				  XFS_SB_VERSION_DALIGNBIT |
157 				  XFS_SB_VERSION_SHAREDBIT |
158 				  XFS_SB_VERSION_LOGV2BIT |
159 				  XFS_SB_VERSION_SECTORBIT |
160 				  XFS_SB_VERSION_EXTFLGBIT |
161 				  XFS_SB_VERSION_DIRV2BIT);
162 	if ((sb->sb_versionnum & vernum_mask) !=
163 	    (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
164 		xchk_block_set_corrupt(sc, bp);
165 
166 	/* Check sb_versionnum bits that can be set after mkfs time. */
167 	vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
168 				  XFS_SB_VERSION_NLINKBIT |
169 				  XFS_SB_VERSION_QUOTABIT);
170 	if ((sb->sb_versionnum & vernum_mask) !=
171 	    (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
172 		xchk_block_set_preen(sc, bp);
173 
174 	if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
175 		xchk_block_set_corrupt(sc, bp);
176 
177 	if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
178 		xchk_block_set_corrupt(sc, bp);
179 
180 	if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
181 		xchk_block_set_corrupt(sc, bp);
182 
183 	if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
184 		xchk_block_set_preen(sc, bp);
185 
186 	if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
187 		xchk_block_set_corrupt(sc, bp);
188 
189 	if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
190 		xchk_block_set_corrupt(sc, bp);
191 
192 	if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
193 		xchk_block_set_corrupt(sc, bp);
194 
195 	if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
196 		xchk_block_set_corrupt(sc, bp);
197 
198 	if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
199 		xchk_block_set_corrupt(sc, bp);
200 
201 	if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
202 		xchk_block_set_corrupt(sc, bp);
203 
204 	if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
205 		xchk_block_set_preen(sc, bp);
206 
207 	/*
208 	 * Skip the summary counters since we track them in memory anyway.
209 	 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
210 	 */
211 
212 	if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
213 		xchk_block_set_preen(sc, bp);
214 
215 	if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
216 		xchk_block_set_preen(sc, bp);
217 
218 	/*
219 	 * Skip the quota flags since repair will force quotacheck.
220 	 * sb_qflags
221 	 */
222 
223 	if (sb->sb_flags != mp->m_sb.sb_flags)
224 		xchk_block_set_corrupt(sc, bp);
225 
226 	if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
227 		xchk_block_set_corrupt(sc, bp);
228 
229 	if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
230 		xchk_block_set_corrupt(sc, bp);
231 
232 	if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
233 		xchk_block_set_preen(sc, bp);
234 
235 	if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
236 		xchk_block_set_preen(sc, bp);
237 
238 	if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
239 		xchk_block_set_corrupt(sc, bp);
240 
241 	if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
242 		xchk_block_set_corrupt(sc, bp);
243 
244 	if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
245 		xchk_block_set_corrupt(sc, bp);
246 
247 	if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
248 		xchk_block_set_corrupt(sc, bp);
249 
250 	/* Do we see any invalid bits in sb_features2? */
251 	if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
252 		if (sb->sb_features2 != 0)
253 			xchk_block_set_corrupt(sc, bp);
254 	} else {
255 		v2_ok = XFS_SB_VERSION2_OKBITS;
256 		if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
257 			v2_ok |= XFS_SB_VERSION2_CRCBIT;
258 
259 		if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
260 			xchk_block_set_corrupt(sc, bp);
261 
262 		if (sb->sb_features2 != sb->sb_bad_features2)
263 			xchk_block_set_preen(sc, bp);
264 	}
265 
266 	/* Check sb_features2 flags that are set at mkfs time. */
267 	features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
268 				    XFS_SB_VERSION2_PROJID32BIT |
269 				    XFS_SB_VERSION2_CRCBIT |
270 				    XFS_SB_VERSION2_FTYPE);
271 	if ((sb->sb_features2 & features_mask) !=
272 	    (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
273 		xchk_block_set_corrupt(sc, bp);
274 
275 	/* Check sb_features2 flags that can be set after mkfs time. */
276 	features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
277 	if ((sb->sb_features2 & features_mask) !=
278 	    (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
279 		xchk_block_set_corrupt(sc, bp);
280 
281 	if (!xfs_sb_version_hascrc(&mp->m_sb)) {
282 		/* all v5 fields must be zero */
283 		if (memchr_inv(&sb->sb_features_compat, 0,
284 				sizeof(struct xfs_dsb) -
285 				offsetof(struct xfs_dsb, sb_features_compat)))
286 			xchk_block_set_corrupt(sc, bp);
287 	} else {
288 		/* Check compat flags; all are set at mkfs time. */
289 		features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
290 		if ((sb->sb_features_compat & features_mask) !=
291 		    (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
292 			xchk_block_set_corrupt(sc, bp);
293 
294 		/* Check ro compat flags; all are set at mkfs time. */
295 		features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
296 					    XFS_SB_FEAT_RO_COMPAT_FINOBT |
297 					    XFS_SB_FEAT_RO_COMPAT_RMAPBT |
298 					    XFS_SB_FEAT_RO_COMPAT_REFLINK);
299 		if ((sb->sb_features_ro_compat & features_mask) !=
300 		    (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
301 		     features_mask))
302 			xchk_block_set_corrupt(sc, bp);
303 
304 		/* Check incompat flags; all are set at mkfs time. */
305 		features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
306 					    XFS_SB_FEAT_INCOMPAT_FTYPE |
307 					    XFS_SB_FEAT_INCOMPAT_SPINODES |
308 					    XFS_SB_FEAT_INCOMPAT_META_UUID);
309 		if ((sb->sb_features_incompat & features_mask) !=
310 		    (cpu_to_be32(mp->m_sb.sb_features_incompat) &
311 		     features_mask))
312 			xchk_block_set_corrupt(sc, bp);
313 
314 		/* Check log incompat flags; all are set at mkfs time. */
315 		features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
316 		if ((sb->sb_features_log_incompat & features_mask) !=
317 		    (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
318 		     features_mask))
319 			xchk_block_set_corrupt(sc, bp);
320 
321 		/* Don't care about sb_crc */
322 
323 		if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
324 			xchk_block_set_corrupt(sc, bp);
325 
326 		if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
327 			xchk_block_set_preen(sc, bp);
328 
329 		/* Don't care about sb_lsn */
330 	}
331 
332 	if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
333 		/* The metadata UUID must be the same for all supers */
334 		if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
335 			xchk_block_set_corrupt(sc, bp);
336 	}
337 
338 	/* Everything else must be zero. */
339 	if (memchr_inv(sb + 1, 0,
340 			BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
341 		xchk_block_set_corrupt(sc, bp);
342 
343 	xchk_superblock_xref(sc, bp);
344 
345 	return error;
346 }
347 
348 /* AGF */
349 
350 /* Tally freespace record lengths. */
351 STATIC int
352 xchk_agf_record_bno_lengths(
353 	struct xfs_btree_cur		*cur,
354 	struct xfs_alloc_rec_incore	*rec,
355 	void				*priv)
356 {
357 	xfs_extlen_t			*blocks = priv;
358 
359 	(*blocks) += rec->ar_blockcount;
360 	return 0;
361 }
362 
363 /* Check agf_freeblks */
364 static inline void
365 xchk_agf_xref_freeblks(
366 	struct xfs_scrub	*sc)
367 {
368 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
369 	xfs_extlen_t		blocks = 0;
370 	int			error;
371 
372 	if (!sc->sa.bno_cur)
373 		return;
374 
375 	error = xfs_alloc_query_all(sc->sa.bno_cur,
376 			xchk_agf_record_bno_lengths, &blocks);
377 	if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
378 		return;
379 	if (blocks != be32_to_cpu(agf->agf_freeblks))
380 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
381 }
382 
383 /* Cross reference the AGF with the cntbt (freespace by length btree) */
384 static inline void
385 xchk_agf_xref_cntbt(
386 	struct xfs_scrub	*sc)
387 {
388 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
389 	xfs_agblock_t		agbno;
390 	xfs_extlen_t		blocks;
391 	int			have;
392 	int			error;
393 
394 	if (!sc->sa.cnt_cur)
395 		return;
396 
397 	/* Any freespace at all? */
398 	error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
399 	if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
400 		return;
401 	if (!have) {
402 		if (agf->agf_freeblks != be32_to_cpu(0))
403 			xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
404 		return;
405 	}
406 
407 	/* Check agf_longest */
408 	error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
409 	if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
410 		return;
411 	if (!have || blocks != be32_to_cpu(agf->agf_longest))
412 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
413 }
414 
415 /* Check the btree block counts in the AGF against the btrees. */
416 STATIC void
417 xchk_agf_xref_btreeblks(
418 	struct xfs_scrub	*sc)
419 {
420 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
421 	struct xfs_mount	*mp = sc->mp;
422 	xfs_agblock_t		blocks;
423 	xfs_agblock_t		btreeblks;
424 	int			error;
425 
426 	/* Check agf_rmap_blocks; set up for agf_btreeblks check */
427 	if (sc->sa.rmap_cur) {
428 		error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
429 		if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
430 			return;
431 		btreeblks = blocks - 1;
432 		if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
433 			xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
434 	} else {
435 		btreeblks = 0;
436 	}
437 
438 	/*
439 	 * No rmap cursor; we can't xref if we have the rmapbt feature.
440 	 * We also can't do it if we're missing the free space btree cursors.
441 	 */
442 	if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
443 	    !sc->sa.bno_cur || !sc->sa.cnt_cur)
444 		return;
445 
446 	/* Check agf_btreeblks */
447 	error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
448 	if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
449 		return;
450 	btreeblks += blocks - 1;
451 
452 	error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
453 	if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
454 		return;
455 	btreeblks += blocks - 1;
456 
457 	if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
458 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
459 }
460 
461 /* Check agf_refcount_blocks against tree size */
462 static inline void
463 xchk_agf_xref_refcblks(
464 	struct xfs_scrub	*sc)
465 {
466 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
467 	xfs_agblock_t		blocks;
468 	int			error;
469 
470 	if (!sc->sa.refc_cur)
471 		return;
472 
473 	error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
474 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
475 		return;
476 	if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
477 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
478 }
479 
480 /* Cross-reference with the other btrees. */
481 STATIC void
482 xchk_agf_xref(
483 	struct xfs_scrub	*sc)
484 {
485 	struct xfs_mount	*mp = sc->mp;
486 	xfs_agblock_t		agbno;
487 	int			error;
488 
489 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
490 		return;
491 
492 	agbno = XFS_AGF_BLOCK(mp);
493 
494 	error = xchk_ag_btcur_init(sc, &sc->sa);
495 	if (error)
496 		return;
497 
498 	xchk_xref_is_used_space(sc, agbno, 1);
499 	xchk_agf_xref_freeblks(sc);
500 	xchk_agf_xref_cntbt(sc);
501 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
502 	xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
503 	xchk_agf_xref_btreeblks(sc);
504 	xchk_xref_is_not_shared(sc, agbno, 1);
505 	xchk_agf_xref_refcblks(sc);
506 
507 	/* scrub teardown will take care of sc->sa for us */
508 }
509 
510 /* Scrub the AGF. */
511 int
512 xchk_agf(
513 	struct xfs_scrub	*sc)
514 {
515 	struct xfs_mount	*mp = sc->mp;
516 	struct xfs_agf		*agf;
517 	xfs_agnumber_t		agno;
518 	xfs_agblock_t		agbno;
519 	xfs_agblock_t		eoag;
520 	xfs_agblock_t		agfl_first;
521 	xfs_agblock_t		agfl_last;
522 	xfs_agblock_t		agfl_count;
523 	xfs_agblock_t		fl_count;
524 	int			level;
525 	int			error = 0;
526 
527 	agno = sc->sa.agno = sc->sm->sm_agno;
528 	error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
529 			&sc->sa.agf_bp, &sc->sa.agfl_bp);
530 	if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
531 		goto out;
532 	xchk_buffer_recheck(sc, sc->sa.agf_bp);
533 
534 	agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
535 
536 	/* Check the AG length */
537 	eoag = be32_to_cpu(agf->agf_length);
538 	if (eoag != xfs_ag_block_count(mp, agno))
539 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
540 
541 	/* Check the AGF btree roots and levels */
542 	agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
543 	if (!xfs_verify_agbno(mp, agno, agbno))
544 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
545 
546 	agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
547 	if (!xfs_verify_agbno(mp, agno, agbno))
548 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
549 
550 	level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
551 	if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
552 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
553 
554 	level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
555 	if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
556 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
557 
558 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
559 		agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
560 		if (!xfs_verify_agbno(mp, agno, agbno))
561 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
562 
563 		level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
564 		if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
565 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
566 	}
567 
568 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
569 		agbno = be32_to_cpu(agf->agf_refcount_root);
570 		if (!xfs_verify_agbno(mp, agno, agbno))
571 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
572 
573 		level = be32_to_cpu(agf->agf_refcount_level);
574 		if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
575 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
576 	}
577 
578 	/* Check the AGFL counters */
579 	agfl_first = be32_to_cpu(agf->agf_flfirst);
580 	agfl_last = be32_to_cpu(agf->agf_fllast);
581 	agfl_count = be32_to_cpu(agf->agf_flcount);
582 	if (agfl_last > agfl_first)
583 		fl_count = agfl_last - agfl_first + 1;
584 	else
585 		fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
586 	if (agfl_count != 0 && fl_count != agfl_count)
587 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
588 
589 	xchk_agf_xref(sc);
590 out:
591 	return error;
592 }
593 
594 /* AGFL */
595 
596 struct xchk_agfl_info {
597 	unsigned int		sz_entries;
598 	unsigned int		nr_entries;
599 	xfs_agblock_t		*entries;
600 	struct xfs_scrub	*sc;
601 };
602 
603 /* Cross-reference with the other btrees. */
604 STATIC void
605 xchk_agfl_block_xref(
606 	struct xfs_scrub	*sc,
607 	xfs_agblock_t		agbno)
608 {
609 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
610 		return;
611 
612 	xchk_xref_is_used_space(sc, agbno, 1);
613 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
614 	xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
615 	xchk_xref_is_not_shared(sc, agbno, 1);
616 }
617 
618 /* Scrub an AGFL block. */
619 STATIC int
620 xchk_agfl_block(
621 	struct xfs_mount	*mp,
622 	xfs_agblock_t		agbno,
623 	void			*priv)
624 {
625 	struct xchk_agfl_info	*sai = priv;
626 	struct xfs_scrub	*sc = sai->sc;
627 	xfs_agnumber_t		agno = sc->sa.agno;
628 
629 	if (xfs_verify_agbno(mp, agno, agbno) &&
630 	    sai->nr_entries < sai->sz_entries)
631 		sai->entries[sai->nr_entries++] = agbno;
632 	else
633 		xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
634 
635 	xchk_agfl_block_xref(sc, agbno);
636 
637 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
638 		return XFS_BTREE_QUERY_RANGE_ABORT;
639 
640 	return 0;
641 }
642 
643 static int
644 xchk_agblock_cmp(
645 	const void		*pa,
646 	const void		*pb)
647 {
648 	const xfs_agblock_t	*a = pa;
649 	const xfs_agblock_t	*b = pb;
650 
651 	return (int)*a - (int)*b;
652 }
653 
654 /* Cross-reference with the other btrees. */
655 STATIC void
656 xchk_agfl_xref(
657 	struct xfs_scrub	*sc)
658 {
659 	struct xfs_mount	*mp = sc->mp;
660 	xfs_agblock_t		agbno;
661 	int			error;
662 
663 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
664 		return;
665 
666 	agbno = XFS_AGFL_BLOCK(mp);
667 
668 	error = xchk_ag_btcur_init(sc, &sc->sa);
669 	if (error)
670 		return;
671 
672 	xchk_xref_is_used_space(sc, agbno, 1);
673 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
674 	xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
675 	xchk_xref_is_not_shared(sc, agbno, 1);
676 
677 	/*
678 	 * Scrub teardown will take care of sc->sa for us.  Leave sc->sa
679 	 * active so that the agfl block xref can use it too.
680 	 */
681 }
682 
683 /* Scrub the AGFL. */
684 int
685 xchk_agfl(
686 	struct xfs_scrub	*sc)
687 {
688 	struct xchk_agfl_info	sai;
689 	struct xfs_agf		*agf;
690 	xfs_agnumber_t		agno;
691 	unsigned int		agflcount;
692 	unsigned int		i;
693 	int			error;
694 
695 	agno = sc->sa.agno = sc->sm->sm_agno;
696 	error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
697 			&sc->sa.agf_bp, &sc->sa.agfl_bp);
698 	if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
699 		goto out;
700 	if (!sc->sa.agf_bp)
701 		return -EFSCORRUPTED;
702 	xchk_buffer_recheck(sc, sc->sa.agfl_bp);
703 
704 	xchk_agfl_xref(sc);
705 
706 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
707 		goto out;
708 
709 	/* Allocate buffer to ensure uniqueness of AGFL entries. */
710 	agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
711 	agflcount = be32_to_cpu(agf->agf_flcount);
712 	if (agflcount > xfs_agfl_size(sc->mp)) {
713 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
714 		goto out;
715 	}
716 	memset(&sai, 0, sizeof(sai));
717 	sai.sc = sc;
718 	sai.sz_entries = agflcount;
719 	sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
720 			KM_MAYFAIL);
721 	if (!sai.entries) {
722 		error = -ENOMEM;
723 		goto out;
724 	}
725 
726 	/* Check the blocks in the AGFL. */
727 	error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
728 			sc->sa.agfl_bp, xchk_agfl_block, &sai);
729 	if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
730 		error = 0;
731 		goto out_free;
732 	}
733 	if (error)
734 		goto out_free;
735 
736 	if (agflcount != sai.nr_entries) {
737 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
738 		goto out_free;
739 	}
740 
741 	/* Sort entries, check for duplicates. */
742 	sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
743 			xchk_agblock_cmp, NULL);
744 	for (i = 1; i < sai.nr_entries; i++) {
745 		if (sai.entries[i] == sai.entries[i - 1]) {
746 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
747 			break;
748 		}
749 	}
750 
751 out_free:
752 	kmem_free(sai.entries);
753 out:
754 	return error;
755 }
756 
757 /* AGI */
758 
759 /* Check agi_count/agi_freecount */
760 static inline void
761 xchk_agi_xref_icounts(
762 	struct xfs_scrub	*sc)
763 {
764 	struct xfs_agi		*agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
765 	xfs_agino_t		icount;
766 	xfs_agino_t		freecount;
767 	int			error;
768 
769 	if (!sc->sa.ino_cur)
770 		return;
771 
772 	error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
773 	if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
774 		return;
775 	if (be32_to_cpu(agi->agi_count) != icount ||
776 	    be32_to_cpu(agi->agi_freecount) != freecount)
777 		xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
778 }
779 
780 /* Cross-reference with the other btrees. */
781 STATIC void
782 xchk_agi_xref(
783 	struct xfs_scrub	*sc)
784 {
785 	struct xfs_mount	*mp = sc->mp;
786 	xfs_agblock_t		agbno;
787 	int			error;
788 
789 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
790 		return;
791 
792 	agbno = XFS_AGI_BLOCK(mp);
793 
794 	error = xchk_ag_btcur_init(sc, &sc->sa);
795 	if (error)
796 		return;
797 
798 	xchk_xref_is_used_space(sc, agbno, 1);
799 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
800 	xchk_agi_xref_icounts(sc);
801 	xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
802 	xchk_xref_is_not_shared(sc, agbno, 1);
803 
804 	/* scrub teardown will take care of sc->sa for us */
805 }
806 
807 /* Scrub the AGI. */
808 int
809 xchk_agi(
810 	struct xfs_scrub	*sc)
811 {
812 	struct xfs_mount	*mp = sc->mp;
813 	struct xfs_agi		*agi;
814 	xfs_agnumber_t		agno;
815 	xfs_agblock_t		agbno;
816 	xfs_agblock_t		eoag;
817 	xfs_agino_t		agino;
818 	xfs_agino_t		first_agino;
819 	xfs_agino_t		last_agino;
820 	xfs_agino_t		icount;
821 	int			i;
822 	int			level;
823 	int			error = 0;
824 
825 	agno = sc->sa.agno = sc->sm->sm_agno;
826 	error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
827 			&sc->sa.agf_bp, &sc->sa.agfl_bp);
828 	if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
829 		goto out;
830 	xchk_buffer_recheck(sc, sc->sa.agi_bp);
831 
832 	agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
833 
834 	/* Check the AG length */
835 	eoag = be32_to_cpu(agi->agi_length);
836 	if (eoag != xfs_ag_block_count(mp, agno))
837 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
838 
839 	/* Check btree roots and levels */
840 	agbno = be32_to_cpu(agi->agi_root);
841 	if (!xfs_verify_agbno(mp, agno, agbno))
842 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
843 
844 	level = be32_to_cpu(agi->agi_level);
845 	if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
846 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
847 
848 	if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
849 		agbno = be32_to_cpu(agi->agi_free_root);
850 		if (!xfs_verify_agbno(mp, agno, agbno))
851 			xchk_block_set_corrupt(sc, sc->sa.agi_bp);
852 
853 		level = be32_to_cpu(agi->agi_free_level);
854 		if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
855 			xchk_block_set_corrupt(sc, sc->sa.agi_bp);
856 	}
857 
858 	/* Check inode counters */
859 	xfs_agino_range(mp, agno, &first_agino, &last_agino);
860 	icount = be32_to_cpu(agi->agi_count);
861 	if (icount > last_agino - first_agino + 1 ||
862 	    icount < be32_to_cpu(agi->agi_freecount))
863 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
864 
865 	/* Check inode pointers */
866 	agino = be32_to_cpu(agi->agi_newino);
867 	if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
868 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
869 
870 	agino = be32_to_cpu(agi->agi_dirino);
871 	if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
872 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
873 
874 	/* Check unlinked inode buckets */
875 	for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
876 		agino = be32_to_cpu(agi->agi_unlinked[i]);
877 		if (agino == NULLAGINO)
878 			continue;
879 		if (!xfs_verify_agino(mp, agno, agino))
880 			xchk_block_set_corrupt(sc, sc->sa.agi_bp);
881 	}
882 
883 	if (agi->agi_pad32 != cpu_to_be32(0))
884 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
885 
886 	xchk_agi_xref(sc);
887 out:
888 	return error;
889 }
890