xref: /openbmc/linux/fs/xfs/scrub/agheader.c (revision feac8c8b)
1 /*
2  * Copyright (C) 2017 Oracle.  All Rights Reserved.
3  *
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it would be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write the Free Software Foundation,
18  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
28 #include "xfs_bit.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
31 #include "xfs_sb.h"
32 #include "xfs_inode.h"
33 #include "xfs_alloc.h"
34 #include "xfs_ialloc.h"
35 #include "xfs_rmap.h"
36 #include "scrub/xfs_scrub.h"
37 #include "scrub/scrub.h"
38 #include "scrub/common.h"
39 #include "scrub/trace.h"
40 
41 /*
42  * Walk all the blocks in the AGFL.  The fn function can return any negative
43  * error code or XFS_BTREE_QUERY_RANGE_ABORT.
44  */
45 int
46 xfs_scrub_walk_agfl(
47 	struct xfs_scrub_context	*sc,
48 	int				(*fn)(struct xfs_scrub_context *,
49 					      xfs_agblock_t bno, void *),
50 	void				*priv)
51 {
52 	struct xfs_agf			*agf;
53 	__be32				*agfl_bno;
54 	struct xfs_mount		*mp = sc->mp;
55 	unsigned int			flfirst;
56 	unsigned int			fllast;
57 	int				i;
58 	int				error;
59 
60 	agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
61 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, sc->sa.agfl_bp);
62 	flfirst = be32_to_cpu(agf->agf_flfirst);
63 	fllast = be32_to_cpu(agf->agf_fllast);
64 
65 	/* Nothing to walk in an empty AGFL. */
66 	if (agf->agf_flcount == cpu_to_be32(0))
67 		return 0;
68 
69 	/* first to last is a consecutive list. */
70 	if (fllast >= flfirst) {
71 		for (i = flfirst; i <= fllast; i++) {
72 			error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
73 			if (error)
74 				return error;
75 			if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
76 				return error;
77 		}
78 
79 		return 0;
80 	}
81 
82 	/* first to the end */
83 	for (i = flfirst; i < XFS_AGFL_SIZE(mp); i++) {
84 		error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
85 		if (error)
86 			return error;
87 		if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
88 			return error;
89 	}
90 
91 	/* the start to last. */
92 	for (i = 0; i <= fllast; i++) {
93 		error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
94 		if (error)
95 			return error;
96 		if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
97 			return error;
98 	}
99 
100 	return 0;
101 }
102 
103 /* Superblock */
104 
105 /* Cross-reference with the other btrees. */
106 STATIC void
107 xfs_scrub_superblock_xref(
108 	struct xfs_scrub_context	*sc,
109 	struct xfs_buf			*bp)
110 {
111 	struct xfs_owner_info		oinfo;
112 	struct xfs_mount		*mp = sc->mp;
113 	xfs_agnumber_t			agno = sc->sm->sm_agno;
114 	xfs_agblock_t			agbno;
115 	int				error;
116 
117 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
118 		return;
119 
120 	agbno = XFS_SB_BLOCK(mp);
121 
122 	error = xfs_scrub_ag_init(sc, agno, &sc->sa);
123 	if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
124 		return;
125 
126 	xfs_scrub_xref_is_used_space(sc, agbno, 1);
127 	xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
128 	xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
129 	xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
130 	xfs_scrub_xref_is_not_shared(sc, agbno, 1);
131 
132 	/* scrub teardown will take care of sc->sa for us */
133 }
134 
135 /*
136  * Scrub the filesystem superblock.
137  *
138  * Note: We do /not/ attempt to check AG 0's superblock.  Mount is
139  * responsible for validating all the geometry information in sb 0, so
140  * if the filesystem is capable of initiating online scrub, then clearly
141  * sb 0 is ok and we can use its information to check everything else.
142  */
143 int
144 xfs_scrub_superblock(
145 	struct xfs_scrub_context	*sc)
146 {
147 	struct xfs_mount		*mp = sc->mp;
148 	struct xfs_buf			*bp;
149 	struct xfs_dsb			*sb;
150 	xfs_agnumber_t			agno;
151 	uint32_t			v2_ok;
152 	__be32				features_mask;
153 	int				error;
154 	__be16				vernum_mask;
155 
156 	agno = sc->sm->sm_agno;
157 	if (agno == 0)
158 		return 0;
159 
160 	error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
161 		  XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
162 		  XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_sb_buf_ops);
163 	/*
164 	 * The superblock verifier can return several different error codes
165 	 * if it thinks the superblock doesn't look right.  For a mount these
166 	 * would all get bounced back to userspace, but if we're here then the
167 	 * fs mounted successfully, which means that this secondary superblock
168 	 * is simply incorrect.  Treat all these codes the same way we treat
169 	 * any corruption.
170 	 */
171 	switch (error) {
172 	case -EINVAL:	/* also -EWRONGFS */
173 	case -ENOSYS:
174 	case -EFBIG:
175 		error = -EFSCORRUPTED;
176 	default:
177 		break;
178 	}
179 	if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
180 		return error;
181 
182 	sb = XFS_BUF_TO_SBP(bp);
183 
184 	/*
185 	 * Verify the geometries match.  Fields that are permanently
186 	 * set by mkfs are checked; fields that can be updated later
187 	 * (and are not propagated to backup superblocks) are preen
188 	 * checked.
189 	 */
190 	if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
191 		xfs_scrub_block_set_corrupt(sc, bp);
192 
193 	if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
194 		xfs_scrub_block_set_corrupt(sc, bp);
195 
196 	if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
197 		xfs_scrub_block_set_corrupt(sc, bp);
198 
199 	if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
200 		xfs_scrub_block_set_corrupt(sc, bp);
201 
202 	if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
203 		xfs_scrub_block_set_preen(sc, bp);
204 
205 	if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
206 		xfs_scrub_block_set_corrupt(sc, bp);
207 
208 	if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
209 		xfs_scrub_block_set_preen(sc, bp);
210 
211 	if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
212 		xfs_scrub_block_set_preen(sc, bp);
213 
214 	if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
215 		xfs_scrub_block_set_preen(sc, bp);
216 
217 	if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
218 		xfs_scrub_block_set_corrupt(sc, bp);
219 
220 	if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
221 		xfs_scrub_block_set_corrupt(sc, bp);
222 
223 	if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
224 		xfs_scrub_block_set_corrupt(sc, bp);
225 
226 	if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
227 		xfs_scrub_block_set_corrupt(sc, bp);
228 
229 	if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
230 		xfs_scrub_block_set_corrupt(sc, bp);
231 
232 	/* Check sb_versionnum bits that are set at mkfs time. */
233 	vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
234 				  XFS_SB_VERSION_NUMBITS |
235 				  XFS_SB_VERSION_ALIGNBIT |
236 				  XFS_SB_VERSION_DALIGNBIT |
237 				  XFS_SB_VERSION_SHAREDBIT |
238 				  XFS_SB_VERSION_LOGV2BIT |
239 				  XFS_SB_VERSION_SECTORBIT |
240 				  XFS_SB_VERSION_EXTFLGBIT |
241 				  XFS_SB_VERSION_DIRV2BIT);
242 	if ((sb->sb_versionnum & vernum_mask) !=
243 	    (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
244 		xfs_scrub_block_set_corrupt(sc, bp);
245 
246 	/* Check sb_versionnum bits that can be set after mkfs time. */
247 	vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
248 				  XFS_SB_VERSION_NLINKBIT |
249 				  XFS_SB_VERSION_QUOTABIT);
250 	if ((sb->sb_versionnum & vernum_mask) !=
251 	    (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
252 		xfs_scrub_block_set_preen(sc, bp);
253 
254 	if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
255 		xfs_scrub_block_set_corrupt(sc, bp);
256 
257 	if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
258 		xfs_scrub_block_set_corrupt(sc, bp);
259 
260 	if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
261 		xfs_scrub_block_set_corrupt(sc, bp);
262 
263 	if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
264 		xfs_scrub_block_set_preen(sc, bp);
265 
266 	if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
267 		xfs_scrub_block_set_corrupt(sc, bp);
268 
269 	if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
270 		xfs_scrub_block_set_corrupt(sc, bp);
271 
272 	if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
273 		xfs_scrub_block_set_corrupt(sc, bp);
274 
275 	if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
276 		xfs_scrub_block_set_corrupt(sc, bp);
277 
278 	if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
279 		xfs_scrub_block_set_corrupt(sc, bp);
280 
281 	if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
282 		xfs_scrub_block_set_corrupt(sc, bp);
283 
284 	if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
285 		xfs_scrub_block_set_preen(sc, bp);
286 
287 	/*
288 	 * Skip the summary counters since we track them in memory anyway.
289 	 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
290 	 */
291 
292 	if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
293 		xfs_scrub_block_set_preen(sc, bp);
294 
295 	if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
296 		xfs_scrub_block_set_preen(sc, bp);
297 
298 	/*
299 	 * Skip the quota flags since repair will force quotacheck.
300 	 * sb_qflags
301 	 */
302 
303 	if (sb->sb_flags != mp->m_sb.sb_flags)
304 		xfs_scrub_block_set_corrupt(sc, bp);
305 
306 	if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
307 		xfs_scrub_block_set_corrupt(sc, bp);
308 
309 	if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
310 		xfs_scrub_block_set_corrupt(sc, bp);
311 
312 	if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
313 		xfs_scrub_block_set_preen(sc, bp);
314 
315 	if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
316 		xfs_scrub_block_set_preen(sc, bp);
317 
318 	if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
319 		xfs_scrub_block_set_corrupt(sc, bp);
320 
321 	if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
322 		xfs_scrub_block_set_corrupt(sc, bp);
323 
324 	if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
325 		xfs_scrub_block_set_corrupt(sc, bp);
326 
327 	if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
328 		xfs_scrub_block_set_corrupt(sc, bp);
329 
330 	/* Do we see any invalid bits in sb_features2? */
331 	if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
332 		if (sb->sb_features2 != 0)
333 			xfs_scrub_block_set_corrupt(sc, bp);
334 	} else {
335 		v2_ok = XFS_SB_VERSION2_OKBITS;
336 		if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
337 			v2_ok |= XFS_SB_VERSION2_CRCBIT;
338 
339 		if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
340 			xfs_scrub_block_set_corrupt(sc, bp);
341 
342 		if (sb->sb_features2 != sb->sb_bad_features2)
343 			xfs_scrub_block_set_preen(sc, bp);
344 	}
345 
346 	/* Check sb_features2 flags that are set at mkfs time. */
347 	features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
348 				    XFS_SB_VERSION2_PROJID32BIT |
349 				    XFS_SB_VERSION2_CRCBIT |
350 				    XFS_SB_VERSION2_FTYPE);
351 	if ((sb->sb_features2 & features_mask) !=
352 	    (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
353 		xfs_scrub_block_set_corrupt(sc, bp);
354 
355 	/* Check sb_features2 flags that can be set after mkfs time. */
356 	features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
357 	if ((sb->sb_features2 & features_mask) !=
358 	    (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
359 		xfs_scrub_block_set_corrupt(sc, bp);
360 
361 	if (!xfs_sb_version_hascrc(&mp->m_sb)) {
362 		/* all v5 fields must be zero */
363 		if (memchr_inv(&sb->sb_features_compat, 0,
364 				sizeof(struct xfs_dsb) -
365 				offsetof(struct xfs_dsb, sb_features_compat)))
366 			xfs_scrub_block_set_corrupt(sc, bp);
367 	} else {
368 		/* Check compat flags; all are set at mkfs time. */
369 		features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
370 		if ((sb->sb_features_compat & features_mask) !=
371 		    (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
372 			xfs_scrub_block_set_corrupt(sc, bp);
373 
374 		/* Check ro compat flags; all are set at mkfs time. */
375 		features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
376 					    XFS_SB_FEAT_RO_COMPAT_FINOBT |
377 					    XFS_SB_FEAT_RO_COMPAT_RMAPBT |
378 					    XFS_SB_FEAT_RO_COMPAT_REFLINK);
379 		if ((sb->sb_features_ro_compat & features_mask) !=
380 		    (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
381 		     features_mask))
382 			xfs_scrub_block_set_corrupt(sc, bp);
383 
384 		/* Check incompat flags; all are set at mkfs time. */
385 		features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
386 					    XFS_SB_FEAT_INCOMPAT_FTYPE |
387 					    XFS_SB_FEAT_INCOMPAT_SPINODES |
388 					    XFS_SB_FEAT_INCOMPAT_META_UUID);
389 		if ((sb->sb_features_incompat & features_mask) !=
390 		    (cpu_to_be32(mp->m_sb.sb_features_incompat) &
391 		     features_mask))
392 			xfs_scrub_block_set_corrupt(sc, bp);
393 
394 		/* Check log incompat flags; all are set at mkfs time. */
395 		features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
396 		if ((sb->sb_features_log_incompat & features_mask) !=
397 		    (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
398 		     features_mask))
399 			xfs_scrub_block_set_corrupt(sc, bp);
400 
401 		/* Don't care about sb_crc */
402 
403 		if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
404 			xfs_scrub_block_set_corrupt(sc, bp);
405 
406 		if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
407 			xfs_scrub_block_set_preen(sc, bp);
408 
409 		/* Don't care about sb_lsn */
410 	}
411 
412 	if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
413 		/* The metadata UUID must be the same for all supers */
414 		if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
415 			xfs_scrub_block_set_corrupt(sc, bp);
416 	}
417 
418 	/* Everything else must be zero. */
419 	if (memchr_inv(sb + 1, 0,
420 			BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
421 		xfs_scrub_block_set_corrupt(sc, bp);
422 
423 	xfs_scrub_superblock_xref(sc, bp);
424 
425 	return error;
426 }
427 
428 /* AGF */
429 
430 /* Tally freespace record lengths. */
431 STATIC int
432 xfs_scrub_agf_record_bno_lengths(
433 	struct xfs_btree_cur		*cur,
434 	struct xfs_alloc_rec_incore	*rec,
435 	void				*priv)
436 {
437 	xfs_extlen_t			*blocks = priv;
438 
439 	(*blocks) += rec->ar_blockcount;
440 	return 0;
441 }
442 
443 /* Check agf_freeblks */
444 static inline void
445 xfs_scrub_agf_xref_freeblks(
446 	struct xfs_scrub_context	*sc)
447 {
448 	struct xfs_agf			*agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
449 	xfs_extlen_t			blocks = 0;
450 	int				error;
451 
452 	if (!sc->sa.bno_cur)
453 		return;
454 
455 	error = xfs_alloc_query_all(sc->sa.bno_cur,
456 			xfs_scrub_agf_record_bno_lengths, &blocks);
457 	if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
458 		return;
459 	if (blocks != be32_to_cpu(agf->agf_freeblks))
460 		xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
461 }
462 
463 /* Cross reference the AGF with the cntbt (freespace by length btree) */
464 static inline void
465 xfs_scrub_agf_xref_cntbt(
466 	struct xfs_scrub_context	*sc)
467 {
468 	struct xfs_agf			*agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
469 	xfs_agblock_t			agbno;
470 	xfs_extlen_t			blocks;
471 	int				have;
472 	int				error;
473 
474 	if (!sc->sa.cnt_cur)
475 		return;
476 
477 	/* Any freespace at all? */
478 	error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
479 	if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
480 		return;
481 	if (!have) {
482 		if (agf->agf_freeblks != be32_to_cpu(0))
483 			xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
484 		return;
485 	}
486 
487 	/* Check agf_longest */
488 	error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
489 	if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
490 		return;
491 	if (!have || blocks != be32_to_cpu(agf->agf_longest))
492 		xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
493 }
494 
495 /* Check the btree block counts in the AGF against the btrees. */
496 STATIC void
497 xfs_scrub_agf_xref_btreeblks(
498 	struct xfs_scrub_context	*sc)
499 {
500 	struct xfs_agf			*agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
501 	struct xfs_mount		*mp = sc->mp;
502 	xfs_agblock_t			blocks;
503 	xfs_agblock_t			btreeblks;
504 	int				error;
505 
506 	/* Check agf_rmap_blocks; set up for agf_btreeblks check */
507 	if (sc->sa.rmap_cur) {
508 		error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
509 		if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
510 			return;
511 		btreeblks = blocks - 1;
512 		if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
513 			xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
514 	} else {
515 		btreeblks = 0;
516 	}
517 
518 	/*
519 	 * No rmap cursor; we can't xref if we have the rmapbt feature.
520 	 * We also can't do it if we're missing the free space btree cursors.
521 	 */
522 	if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
523 	    !sc->sa.bno_cur || !sc->sa.cnt_cur)
524 		return;
525 
526 	/* Check agf_btreeblks */
527 	error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
528 	if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
529 		return;
530 	btreeblks += blocks - 1;
531 
532 	error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
533 	if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
534 		return;
535 	btreeblks += blocks - 1;
536 
537 	if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
538 		xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
539 }
540 
541 /* Check agf_refcount_blocks against tree size */
542 static inline void
543 xfs_scrub_agf_xref_refcblks(
544 	struct xfs_scrub_context	*sc)
545 {
546 	struct xfs_agf			*agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
547 	xfs_agblock_t			blocks;
548 	int				error;
549 
550 	if (!sc->sa.refc_cur)
551 		return;
552 
553 	error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
554 	if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
555 		return;
556 	if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
557 		xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
558 }
559 
560 /* Cross-reference with the other btrees. */
561 STATIC void
562 xfs_scrub_agf_xref(
563 	struct xfs_scrub_context	*sc)
564 {
565 	struct xfs_owner_info		oinfo;
566 	struct xfs_mount		*mp = sc->mp;
567 	xfs_agblock_t			agbno;
568 	int				error;
569 
570 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
571 		return;
572 
573 	agbno = XFS_AGF_BLOCK(mp);
574 
575 	error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
576 	if (error)
577 		return;
578 
579 	xfs_scrub_xref_is_used_space(sc, agbno, 1);
580 	xfs_scrub_agf_xref_freeblks(sc);
581 	xfs_scrub_agf_xref_cntbt(sc);
582 	xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
583 	xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
584 	xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
585 	xfs_scrub_agf_xref_btreeblks(sc);
586 	xfs_scrub_xref_is_not_shared(sc, agbno, 1);
587 	xfs_scrub_agf_xref_refcblks(sc);
588 
589 	/* scrub teardown will take care of sc->sa for us */
590 }
591 
592 /* Scrub the AGF. */
593 int
594 xfs_scrub_agf(
595 	struct xfs_scrub_context	*sc)
596 {
597 	struct xfs_mount		*mp = sc->mp;
598 	struct xfs_agf			*agf;
599 	xfs_agnumber_t			agno;
600 	xfs_agblock_t			agbno;
601 	xfs_agblock_t			eoag;
602 	xfs_agblock_t			agfl_first;
603 	xfs_agblock_t			agfl_last;
604 	xfs_agblock_t			agfl_count;
605 	xfs_agblock_t			fl_count;
606 	int				level;
607 	int				error = 0;
608 
609 	agno = sc->sa.agno = sc->sm->sm_agno;
610 	error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
611 			&sc->sa.agf_bp, &sc->sa.agfl_bp);
612 	if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
613 		goto out;
614 	xfs_scrub_buffer_recheck(sc, sc->sa.agf_bp);
615 
616 	agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
617 
618 	/* Check the AG length */
619 	eoag = be32_to_cpu(agf->agf_length);
620 	if (eoag != xfs_ag_block_count(mp, agno))
621 		xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
622 
623 	/* Check the AGF btree roots and levels */
624 	agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
625 	if (!xfs_verify_agbno(mp, agno, agbno))
626 		xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
627 
628 	agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
629 	if (!xfs_verify_agbno(mp, agno, agbno))
630 		xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
631 
632 	level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
633 	if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
634 		xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
635 
636 	level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
637 	if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
638 		xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
639 
640 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
641 		agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
642 		if (!xfs_verify_agbno(mp, agno, agbno))
643 			xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
644 
645 		level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
646 		if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
647 			xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
648 	}
649 
650 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
651 		agbno = be32_to_cpu(agf->agf_refcount_root);
652 		if (!xfs_verify_agbno(mp, agno, agbno))
653 			xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
654 
655 		level = be32_to_cpu(agf->agf_refcount_level);
656 		if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
657 			xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
658 	}
659 
660 	/* Check the AGFL counters */
661 	agfl_first = be32_to_cpu(agf->agf_flfirst);
662 	agfl_last = be32_to_cpu(agf->agf_fllast);
663 	agfl_count = be32_to_cpu(agf->agf_flcount);
664 	if (agfl_last > agfl_first)
665 		fl_count = agfl_last - agfl_first + 1;
666 	else
667 		fl_count = XFS_AGFL_SIZE(mp) - agfl_first + agfl_last + 1;
668 	if (agfl_count != 0 && fl_count != agfl_count)
669 		xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
670 
671 	xfs_scrub_agf_xref(sc);
672 out:
673 	return error;
674 }
675 
676 /* AGFL */
677 
678 struct xfs_scrub_agfl_info {
679 	struct xfs_owner_info		oinfo;
680 	unsigned int			sz_entries;
681 	unsigned int			nr_entries;
682 	xfs_agblock_t			*entries;
683 };
684 
685 /* Cross-reference with the other btrees. */
686 STATIC void
687 xfs_scrub_agfl_block_xref(
688 	struct xfs_scrub_context	*sc,
689 	xfs_agblock_t			agbno,
690 	struct xfs_owner_info		*oinfo)
691 {
692 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
693 		return;
694 
695 	xfs_scrub_xref_is_used_space(sc, agbno, 1);
696 	xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
697 	xfs_scrub_xref_is_owned_by(sc, agbno, 1, oinfo);
698 	xfs_scrub_xref_is_not_shared(sc, agbno, 1);
699 }
700 
701 /* Scrub an AGFL block. */
702 STATIC int
703 xfs_scrub_agfl_block(
704 	struct xfs_scrub_context	*sc,
705 	xfs_agblock_t			agbno,
706 	void				*priv)
707 {
708 	struct xfs_mount		*mp = sc->mp;
709 	struct xfs_scrub_agfl_info	*sai = priv;
710 	xfs_agnumber_t			agno = sc->sa.agno;
711 
712 	if (xfs_verify_agbno(mp, agno, agbno) &&
713 	    sai->nr_entries < sai->sz_entries)
714 		sai->entries[sai->nr_entries++] = agbno;
715 	else
716 		xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
717 
718 	xfs_scrub_agfl_block_xref(sc, agbno, priv);
719 
720 	return 0;
721 }
722 
723 static int
724 xfs_scrub_agblock_cmp(
725 	const void		*pa,
726 	const void		*pb)
727 {
728 	const xfs_agblock_t	*a = pa;
729 	const xfs_agblock_t	*b = pb;
730 
731 	return (int)*a - (int)*b;
732 }
733 
734 /* Cross-reference with the other btrees. */
735 STATIC void
736 xfs_scrub_agfl_xref(
737 	struct xfs_scrub_context	*sc)
738 {
739 	struct xfs_owner_info		oinfo;
740 	struct xfs_mount		*mp = sc->mp;
741 	xfs_agblock_t			agbno;
742 	int				error;
743 
744 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
745 		return;
746 
747 	agbno = XFS_AGFL_BLOCK(mp);
748 
749 	error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
750 	if (error)
751 		return;
752 
753 	xfs_scrub_xref_is_used_space(sc, agbno, 1);
754 	xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
755 	xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
756 	xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
757 	xfs_scrub_xref_is_not_shared(sc, agbno, 1);
758 
759 	/*
760 	 * Scrub teardown will take care of sc->sa for us.  Leave sc->sa
761 	 * active so that the agfl block xref can use it too.
762 	 */
763 }
764 
765 /* Scrub the AGFL. */
766 int
767 xfs_scrub_agfl(
768 	struct xfs_scrub_context	*sc)
769 {
770 	struct xfs_scrub_agfl_info	sai;
771 	struct xfs_agf			*agf;
772 	xfs_agnumber_t			agno;
773 	unsigned int			agflcount;
774 	unsigned int			i;
775 	int				error;
776 
777 	agno = sc->sa.agno = sc->sm->sm_agno;
778 	error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
779 			&sc->sa.agf_bp, &sc->sa.agfl_bp);
780 	if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
781 		goto out;
782 	if (!sc->sa.agf_bp)
783 		return -EFSCORRUPTED;
784 	xfs_scrub_buffer_recheck(sc, sc->sa.agfl_bp);
785 
786 	xfs_scrub_agfl_xref(sc);
787 
788 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
789 		goto out;
790 
791 	/* Allocate buffer to ensure uniqueness of AGFL entries. */
792 	agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
793 	agflcount = be32_to_cpu(agf->agf_flcount);
794 	if (agflcount > XFS_AGFL_SIZE(sc->mp)) {
795 		xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
796 		goto out;
797 	}
798 	memset(&sai, 0, sizeof(sai));
799 	sai.sz_entries = agflcount;
800 	sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS);
801 	if (!sai.entries) {
802 		error = -ENOMEM;
803 		goto out;
804 	}
805 
806 	/* Check the blocks in the AGFL. */
807 	xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
808 	error = xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, &sai);
809 	if (error)
810 		goto out_free;
811 
812 	if (agflcount != sai.nr_entries) {
813 		xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
814 		goto out_free;
815 	}
816 
817 	/* Sort entries, check for duplicates. */
818 	sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
819 			xfs_scrub_agblock_cmp, NULL);
820 	for (i = 1; i < sai.nr_entries; i++) {
821 		if (sai.entries[i] == sai.entries[i - 1]) {
822 			xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
823 			break;
824 		}
825 	}
826 
827 out_free:
828 	kmem_free(sai.entries);
829 out:
830 	return error;
831 }
832 
833 /* AGI */
834 
835 /* Check agi_count/agi_freecount */
836 static inline void
837 xfs_scrub_agi_xref_icounts(
838 	struct xfs_scrub_context	*sc)
839 {
840 	struct xfs_agi			*agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
841 	xfs_agino_t			icount;
842 	xfs_agino_t			freecount;
843 	int				error;
844 
845 	if (!sc->sa.ino_cur)
846 		return;
847 
848 	error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
849 	if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.ino_cur))
850 		return;
851 	if (be32_to_cpu(agi->agi_count) != icount ||
852 	    be32_to_cpu(agi->agi_freecount) != freecount)
853 		xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agi_bp);
854 }
855 
856 /* Cross-reference with the other btrees. */
857 STATIC void
858 xfs_scrub_agi_xref(
859 	struct xfs_scrub_context	*sc)
860 {
861 	struct xfs_owner_info		oinfo;
862 	struct xfs_mount		*mp = sc->mp;
863 	xfs_agblock_t			agbno;
864 	int				error;
865 
866 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
867 		return;
868 
869 	agbno = XFS_AGI_BLOCK(mp);
870 
871 	error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
872 	if (error)
873 		return;
874 
875 	xfs_scrub_xref_is_used_space(sc, agbno, 1);
876 	xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
877 	xfs_scrub_agi_xref_icounts(sc);
878 	xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
879 	xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
880 	xfs_scrub_xref_is_not_shared(sc, agbno, 1);
881 
882 	/* scrub teardown will take care of sc->sa for us */
883 }
884 
885 /* Scrub the AGI. */
886 int
887 xfs_scrub_agi(
888 	struct xfs_scrub_context	*sc)
889 {
890 	struct xfs_mount		*mp = sc->mp;
891 	struct xfs_agi			*agi;
892 	xfs_agnumber_t			agno;
893 	xfs_agblock_t			agbno;
894 	xfs_agblock_t			eoag;
895 	xfs_agino_t			agino;
896 	xfs_agino_t			first_agino;
897 	xfs_agino_t			last_agino;
898 	xfs_agino_t			icount;
899 	int				i;
900 	int				level;
901 	int				error = 0;
902 
903 	agno = sc->sa.agno = sc->sm->sm_agno;
904 	error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
905 			&sc->sa.agf_bp, &sc->sa.agfl_bp);
906 	if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
907 		goto out;
908 	xfs_scrub_buffer_recheck(sc, sc->sa.agi_bp);
909 
910 	agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
911 
912 	/* Check the AG length */
913 	eoag = be32_to_cpu(agi->agi_length);
914 	if (eoag != xfs_ag_block_count(mp, agno))
915 		xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
916 
917 	/* Check btree roots and levels */
918 	agbno = be32_to_cpu(agi->agi_root);
919 	if (!xfs_verify_agbno(mp, agno, agbno))
920 		xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
921 
922 	level = be32_to_cpu(agi->agi_level);
923 	if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
924 		xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
925 
926 	if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
927 		agbno = be32_to_cpu(agi->agi_free_root);
928 		if (!xfs_verify_agbno(mp, agno, agbno))
929 			xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
930 
931 		level = be32_to_cpu(agi->agi_free_level);
932 		if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
933 			xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
934 	}
935 
936 	/* Check inode counters */
937 	xfs_ialloc_agino_range(mp, agno, &first_agino, &last_agino);
938 	icount = be32_to_cpu(agi->agi_count);
939 	if (icount > last_agino - first_agino + 1 ||
940 	    icount < be32_to_cpu(agi->agi_freecount))
941 		xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
942 
943 	/* Check inode pointers */
944 	agino = be32_to_cpu(agi->agi_newino);
945 	if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
946 		xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
947 
948 	agino = be32_to_cpu(agi->agi_dirino);
949 	if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
950 		xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
951 
952 	/* Check unlinked inode buckets */
953 	for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
954 		agino = be32_to_cpu(agi->agi_unlinked[i]);
955 		if (agino == NULLAGINO)
956 			continue;
957 		if (!xfs_verify_agino(mp, agno, agino))
958 			xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
959 	}
960 
961 	if (agi->agi_pad32 != cpu_to_be32(0))
962 		xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
963 
964 	xfs_scrub_agi_xref(sc);
965 out:
966 	return error;
967 }
968