xref: /openbmc/linux/fs/xfs/scrub/bmap.c (revision 2c363576)
1 /*
2  * Copyright (C) 2017 Oracle.  All Rights Reserved.
3  *
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it would be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write the Free Software Foundation,
18  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
28 #include "xfs_bit.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
31 #include "xfs_sb.h"
32 #include "xfs_inode.h"
33 #include "xfs_inode_fork.h"
34 #include "xfs_alloc.h"
35 #include "xfs_rtalloc.h"
36 #include "xfs_bmap.h"
37 #include "xfs_bmap_util.h"
38 #include "xfs_bmap_btree.h"
39 #include "xfs_rmap.h"
40 #include "xfs_rmap_btree.h"
41 #include "xfs_refcount.h"
42 #include "scrub/xfs_scrub.h"
43 #include "scrub/scrub.h"
44 #include "scrub/common.h"
45 #include "scrub/btree.h"
46 #include "scrub/trace.h"
47 
48 /* Set us up with an inode's bmap. */
49 int
50 xfs_scrub_setup_inode_bmap(
51 	struct xfs_scrub_context	*sc,
52 	struct xfs_inode		*ip)
53 {
54 	struct xfs_mount		*mp = sc->mp;
55 	int				error;
56 
57 	error = xfs_scrub_get_inode(sc, ip);
58 	if (error)
59 		goto out;
60 
61 	sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
62 	xfs_ilock(sc->ip, sc->ilock_flags);
63 
64 	/*
65 	 * We don't want any ephemeral data fork updates sitting around
66 	 * while we inspect block mappings, so wait for directio to finish
67 	 * and flush dirty data if we have delalloc reservations.
68 	 */
69 	if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
70 	    sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
71 		inode_dio_wait(VFS_I(sc->ip));
72 		error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
73 		if (error)
74 			goto out;
75 	}
76 
77 	/* Got the inode, lock it and we're ready to go. */
78 	error = xfs_scrub_trans_alloc(sc->sm, mp, &sc->tp);
79 	if (error)
80 		goto out;
81 	sc->ilock_flags |= XFS_ILOCK_EXCL;
82 	xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
83 
84 out:
85 	/* scrub teardown will unlock and release the inode */
86 	return error;
87 }
88 
89 /*
90  * Inode fork block mapping (BMBT) scrubber.
91  * More complex than the others because we have to scrub
92  * all the extents regardless of whether or not the fork
93  * is in btree format.
94  */
95 
96 struct xfs_scrub_bmap_info {
97 	struct xfs_scrub_context	*sc;
98 	xfs_fileoff_t			lastoff;
99 	bool				is_rt;
100 	bool				is_shared;
101 	int				whichfork;
102 };
103 
104 /* Look for a corresponding rmap for this irec. */
105 static inline bool
106 xfs_scrub_bmap_get_rmap(
107 	struct xfs_scrub_bmap_info	*info,
108 	struct xfs_bmbt_irec		*irec,
109 	xfs_agblock_t			agbno,
110 	uint64_t			owner,
111 	struct xfs_rmap_irec		*rmap)
112 {
113 	xfs_fileoff_t			offset;
114 	unsigned int			rflags = 0;
115 	int				has_rmap;
116 	int				error;
117 
118 	if (info->whichfork == XFS_ATTR_FORK)
119 		rflags |= XFS_RMAP_ATTR_FORK;
120 
121 	/*
122 	 * CoW staging extents are owned (on disk) by the refcountbt, so
123 	 * their rmaps do not have offsets.
124 	 */
125 	if (info->whichfork == XFS_COW_FORK)
126 		offset = 0;
127 	else
128 		offset = irec->br_startoff;
129 
130 	/*
131 	 * If the caller thinks this could be a shared bmbt extent (IOWs,
132 	 * any data fork extent of a reflink inode) then we have to use the
133 	 * range rmap lookup to make sure we get the correct owner/offset.
134 	 */
135 	if (info->is_shared) {
136 		error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
137 				owner, offset, rflags, rmap, &has_rmap);
138 		if (!xfs_scrub_should_check_xref(info->sc, &error,
139 				&info->sc->sa.rmap_cur))
140 			return false;
141 		goto out;
142 	}
143 
144 	/*
145 	 * Otherwise, use the (faster) regular lookup.
146 	 */
147 	error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
148 			offset, rflags, &has_rmap);
149 	if (!xfs_scrub_should_check_xref(info->sc, &error,
150 			&info->sc->sa.rmap_cur))
151 		return false;
152 	if (!has_rmap)
153 		goto out;
154 
155 	error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
156 	if (!xfs_scrub_should_check_xref(info->sc, &error,
157 			&info->sc->sa.rmap_cur))
158 		return false;
159 
160 out:
161 	if (!has_rmap)
162 		xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
163 			irec->br_startoff);
164 	return has_rmap;
165 }
166 
167 /* Make sure that we have rmapbt records for this extent. */
168 STATIC void
169 xfs_scrub_bmap_xref_rmap(
170 	struct xfs_scrub_bmap_info	*info,
171 	struct xfs_bmbt_irec		*irec,
172 	xfs_agblock_t			agbno)
173 {
174 	struct xfs_rmap_irec		rmap;
175 	unsigned long long		rmap_end;
176 	uint64_t			owner;
177 
178 	if (!info->sc->sa.rmap_cur)
179 		return;
180 
181 	if (info->whichfork == XFS_COW_FORK)
182 		owner = XFS_RMAP_OWN_COW;
183 	else
184 		owner = info->sc->ip->i_ino;
185 
186 	/* Find the rmap record for this irec. */
187 	if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap))
188 		return;
189 
190 	/* Check the rmap. */
191 	rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
192 	if (rmap.rm_startblock > agbno ||
193 	    agbno + irec->br_blockcount > rmap_end)
194 		xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
195 				irec->br_startoff);
196 
197 	/*
198 	 * Check the logical offsets if applicable.  CoW staging extents
199 	 * don't track logical offsets since the mappings only exist in
200 	 * memory.
201 	 */
202 	if (info->whichfork != XFS_COW_FORK) {
203 		rmap_end = (unsigned long long)rmap.rm_offset +
204 				rmap.rm_blockcount;
205 		if (rmap.rm_offset > irec->br_startoff ||
206 		    irec->br_startoff + irec->br_blockcount > rmap_end)
207 			xfs_scrub_fblock_xref_set_corrupt(info->sc,
208 					info->whichfork, irec->br_startoff);
209 	}
210 
211 	if (rmap.rm_owner != owner)
212 		xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
213 				irec->br_startoff);
214 
215 	/*
216 	 * Check for discrepancies between the unwritten flag in the irec and
217 	 * the rmap.  Note that the (in-memory) CoW fork distinguishes between
218 	 * unwritten and written extents, but we don't track that in the rmap
219 	 * records because the blocks are owned (on-disk) by the refcountbt,
220 	 * which doesn't track unwritten state.
221 	 */
222 	if (owner != XFS_RMAP_OWN_COW &&
223 	    irec->br_state == XFS_EXT_UNWRITTEN &&
224 	    !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
225 		xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
226 				irec->br_startoff);
227 
228 	if (info->whichfork == XFS_ATTR_FORK &&
229 	    !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
230 		xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
231 				irec->br_startoff);
232 	if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
233 		xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
234 				irec->br_startoff);
235 }
236 
237 /* Cross-reference a single rtdev extent record. */
238 STATIC void
239 xfs_scrub_bmap_rt_extent_xref(
240 	struct xfs_scrub_bmap_info	*info,
241 	struct xfs_inode		*ip,
242 	struct xfs_btree_cur		*cur,
243 	struct xfs_bmbt_irec		*irec)
244 {
245 	if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
246 		return;
247 
248 	xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock,
249 			irec->br_blockcount);
250 }
251 
252 /* Cross-reference a single datadev extent record. */
253 STATIC void
254 xfs_scrub_bmap_extent_xref(
255 	struct xfs_scrub_bmap_info	*info,
256 	struct xfs_inode		*ip,
257 	struct xfs_btree_cur		*cur,
258 	struct xfs_bmbt_irec		*irec)
259 {
260 	struct xfs_mount		*mp = info->sc->mp;
261 	xfs_agnumber_t			agno;
262 	xfs_agblock_t			agbno;
263 	xfs_extlen_t			len;
264 	int				error;
265 
266 	if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
267 		return;
268 
269 	agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
270 	agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
271 	len = irec->br_blockcount;
272 
273 	error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa);
274 	if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork,
275 			irec->br_startoff, &error))
276 		return;
277 
278 	xfs_scrub_xref_is_used_space(info->sc, agbno, len);
279 	xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len);
280 	xfs_scrub_bmap_xref_rmap(info, irec, agbno);
281 	switch (info->whichfork) {
282 	case XFS_DATA_FORK:
283 		if (xfs_is_reflink_inode(info->sc->ip))
284 			break;
285 		/* fall through */
286 	case XFS_ATTR_FORK:
287 		xfs_scrub_xref_is_not_shared(info->sc, agbno,
288 				irec->br_blockcount);
289 		break;
290 	case XFS_COW_FORK:
291 		xfs_scrub_xref_is_cow_staging(info->sc, agbno,
292 				irec->br_blockcount);
293 		break;
294 	}
295 
296 	xfs_scrub_ag_free(info->sc, &info->sc->sa);
297 }
298 
299 /* Scrub a single extent record. */
300 STATIC int
301 xfs_scrub_bmap_extent(
302 	struct xfs_inode		*ip,
303 	struct xfs_btree_cur		*cur,
304 	struct xfs_scrub_bmap_info	*info,
305 	struct xfs_bmbt_irec		*irec)
306 {
307 	struct xfs_mount		*mp = info->sc->mp;
308 	struct xfs_buf			*bp = NULL;
309 	xfs_filblks_t			end;
310 	int				error = 0;
311 
312 	if (cur)
313 		xfs_btree_get_block(cur, 0, &bp);
314 
315 	/*
316 	 * Check for out-of-order extents.  This record could have come
317 	 * from the incore list, for which there is no ordering check.
318 	 */
319 	if (irec->br_startoff < info->lastoff)
320 		xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
321 				irec->br_startoff);
322 
323 	/* There should never be a "hole" extent in either extent list. */
324 	if (irec->br_startblock == HOLESTARTBLOCK)
325 		xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
326 				irec->br_startoff);
327 
328 	/*
329 	 * Check for delalloc extents.  We never iterate the ones in the
330 	 * in-core extent scan, and we should never see these in the bmbt.
331 	 */
332 	if (isnullstartblock(irec->br_startblock))
333 		xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
334 				irec->br_startoff);
335 
336 	/* Make sure the extent points to a valid place. */
337 	if (irec->br_blockcount > MAXEXTLEN)
338 		xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
339 				irec->br_startoff);
340 	if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
341 		xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
342 				irec->br_startoff);
343 	end = irec->br_startblock + irec->br_blockcount - 1;
344 	if (info->is_rt &&
345 	    (!xfs_verify_rtbno(mp, irec->br_startblock) ||
346 	     !xfs_verify_rtbno(mp, end)))
347 		xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
348 				irec->br_startoff);
349 	if (!info->is_rt &&
350 	    (!xfs_verify_fsbno(mp, irec->br_startblock) ||
351 	     !xfs_verify_fsbno(mp, end) ||
352 	     XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
353 				XFS_FSB_TO_AGNO(mp, end)))
354 		xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
355 				irec->br_startoff);
356 
357 	/* We don't allow unwritten extents on attr forks. */
358 	if (irec->br_state == XFS_EXT_UNWRITTEN &&
359 	    info->whichfork == XFS_ATTR_FORK)
360 		xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
361 				irec->br_startoff);
362 
363 	if (info->is_rt)
364 		xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec);
365 	else
366 		xfs_scrub_bmap_extent_xref(info, ip, cur, irec);
367 
368 	info->lastoff = irec->br_startoff + irec->br_blockcount;
369 	return error;
370 }
371 
372 /* Scrub a bmbt record. */
373 STATIC int
374 xfs_scrub_bmapbt_rec(
375 	struct xfs_scrub_btree		*bs,
376 	union xfs_btree_rec		*rec)
377 {
378 	struct xfs_bmbt_irec		irec;
379 	struct xfs_scrub_bmap_info	*info = bs->private;
380 	struct xfs_inode		*ip = bs->cur->bc_private.b.ip;
381 	struct xfs_buf			*bp = NULL;
382 	struct xfs_btree_block		*block;
383 	uint64_t			owner;
384 	int				i;
385 
386 	/*
387 	 * Check the owners of the btree blocks up to the level below
388 	 * the root since the verifiers don't do that.
389 	 */
390 	if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
391 	    bs->cur->bc_ptrs[0] == 1) {
392 		for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
393 			block = xfs_btree_get_block(bs->cur, i, &bp);
394 			owner = be64_to_cpu(block->bb_u.l.bb_owner);
395 			if (owner != ip->i_ino)
396 				xfs_scrub_fblock_set_corrupt(bs->sc,
397 						info->whichfork, 0);
398 		}
399 	}
400 
401 	/* Set up the in-core record and scrub it. */
402 	xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
403 	return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec);
404 }
405 
406 /* Scan the btree records. */
407 STATIC int
408 xfs_scrub_bmap_btree(
409 	struct xfs_scrub_context	*sc,
410 	int				whichfork,
411 	struct xfs_scrub_bmap_info	*info)
412 {
413 	struct xfs_owner_info		oinfo;
414 	struct xfs_mount		*mp = sc->mp;
415 	struct xfs_inode		*ip = sc->ip;
416 	struct xfs_btree_cur		*cur;
417 	int				error;
418 
419 	cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
420 	xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
421 	error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info);
422 	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR :
423 					  XFS_BTREE_NOERROR);
424 	return error;
425 }
426 
427 struct xfs_scrub_bmap_check_rmap_info {
428 	struct xfs_scrub_context	*sc;
429 	int				whichfork;
430 	struct xfs_iext_cursor		icur;
431 };
432 
433 /* Can we find bmaps that fit this rmap? */
434 STATIC int
435 xfs_scrub_bmap_check_rmap(
436 	struct xfs_btree_cur		*cur,
437 	struct xfs_rmap_irec		*rec,
438 	void				*priv)
439 {
440 	struct xfs_bmbt_irec		irec;
441 	struct xfs_scrub_bmap_check_rmap_info	*sbcri = priv;
442 	struct xfs_ifork		*ifp;
443 	struct xfs_scrub_context	*sc = sbcri->sc;
444 	bool				have_map;
445 
446 	/* Is this even the right fork? */
447 	if (rec->rm_owner != sc->ip->i_ino)
448 		return 0;
449 	if ((sbcri->whichfork == XFS_ATTR_FORK) ^
450 	    !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
451 		return 0;
452 	if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
453 		return 0;
454 
455 	/* Now look up the bmbt record. */
456 	ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
457 	if (!ifp) {
458 		xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
459 				rec->rm_offset);
460 		goto out;
461 	}
462 	have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
463 			&sbcri->icur, &irec);
464 	if (!have_map)
465 		xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
466 				rec->rm_offset);
467 	/*
468 	 * bmap extent record lengths are constrained to 2^21 blocks in length
469 	 * because of space constraints in the on-disk metadata structure.
470 	 * However, rmap extent record lengths are constrained only by AG
471 	 * length, so we have to loop through the bmbt to make sure that the
472 	 * entire rmap is covered by bmbt records.
473 	 */
474 	while (have_map) {
475 		if (irec.br_startoff != rec->rm_offset)
476 			xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
477 					rec->rm_offset);
478 		if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
479 				cur->bc_private.a.agno, rec->rm_startblock))
480 			xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
481 					rec->rm_offset);
482 		if (irec.br_blockcount > rec->rm_blockcount)
483 			xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
484 					rec->rm_offset);
485 		if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
486 			break;
487 		rec->rm_startblock += irec.br_blockcount;
488 		rec->rm_offset += irec.br_blockcount;
489 		rec->rm_blockcount -= irec.br_blockcount;
490 		if (rec->rm_blockcount == 0)
491 			break;
492 		have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
493 		if (!have_map)
494 			xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
495 					rec->rm_offset);
496 	}
497 
498 out:
499 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
500 		return XFS_BTREE_QUERY_RANGE_ABORT;
501 	return 0;
502 }
503 
504 /* Make sure each rmap has a corresponding bmbt entry. */
505 STATIC int
506 xfs_scrub_bmap_check_ag_rmaps(
507 	struct xfs_scrub_context	*sc,
508 	int				whichfork,
509 	xfs_agnumber_t			agno)
510 {
511 	struct xfs_scrub_bmap_check_rmap_info	sbcri;
512 	struct xfs_btree_cur		*cur;
513 	struct xfs_buf			*agf;
514 	int				error;
515 
516 	error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
517 	if (error)
518 		return error;
519 
520 	cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
521 	if (!cur) {
522 		error = -ENOMEM;
523 		goto out_agf;
524 	}
525 
526 	sbcri.sc = sc;
527 	sbcri.whichfork = whichfork;
528 	error = xfs_rmap_query_all(cur, xfs_scrub_bmap_check_rmap, &sbcri);
529 	if (error == XFS_BTREE_QUERY_RANGE_ABORT)
530 		error = 0;
531 
532 	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
533 out_agf:
534 	xfs_trans_brelse(sc->tp, agf);
535 	return error;
536 }
537 
538 /* Make sure each rmap has a corresponding bmbt entry. */
539 STATIC int
540 xfs_scrub_bmap_check_rmaps(
541 	struct xfs_scrub_context	*sc,
542 	int				whichfork)
543 {
544 	loff_t				size;
545 	xfs_agnumber_t			agno;
546 	int				error;
547 
548 	if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
549 	    whichfork == XFS_COW_FORK ||
550 	    (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
551 		return 0;
552 
553 	/* Don't support realtime rmap checks yet. */
554 	if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
555 		return 0;
556 
557 	/*
558 	 * Only do this for complex maps that are in btree format, or for
559 	 * situations where we would seem to have a size but zero extents.
560 	 * The inode repair code can zap broken iforks, which means we have
561 	 * to flag this bmap as corrupt if there are rmaps that need to be
562 	 * reattached.
563 	 */
564 	switch (whichfork) {
565 	case XFS_DATA_FORK:
566 		size = i_size_read(VFS_I(sc->ip));
567 		break;
568 	case XFS_ATTR_FORK:
569 		size = XFS_IFORK_Q(sc->ip);
570 		break;
571 	default:
572 		size = 0;
573 		break;
574 	}
575 	if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE &&
576 	    (size == 0 || XFS_IFORK_NEXTENTS(sc->ip, whichfork) > 0))
577 		return 0;
578 
579 	for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
580 		error = xfs_scrub_bmap_check_ag_rmaps(sc, whichfork, agno);
581 		if (error)
582 			return error;
583 		if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
584 			break;
585 	}
586 
587 	return 0;
588 }
589 
590 /*
591  * Scrub an inode fork's block mappings.
592  *
593  * First we scan every record in every btree block, if applicable.
594  * Then we unconditionally scan the incore extent cache.
595  */
596 STATIC int
597 xfs_scrub_bmap(
598 	struct xfs_scrub_context	*sc,
599 	int				whichfork)
600 {
601 	struct xfs_bmbt_irec		irec;
602 	struct xfs_scrub_bmap_info	info = { NULL };
603 	struct xfs_mount		*mp = sc->mp;
604 	struct xfs_inode		*ip = sc->ip;
605 	struct xfs_ifork		*ifp;
606 	xfs_fileoff_t			endoff;
607 	struct xfs_iext_cursor		icur;
608 	int				error = 0;
609 
610 	ifp = XFS_IFORK_PTR(ip, whichfork);
611 
612 	info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
613 	info.whichfork = whichfork;
614 	info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
615 	info.sc = sc;
616 
617 	switch (whichfork) {
618 	case XFS_COW_FORK:
619 		/* Non-existent CoW forks are ignorable. */
620 		if (!ifp)
621 			goto out;
622 		/* No CoW forks on non-reflink inodes/filesystems. */
623 		if (!xfs_is_reflink_inode(ip)) {
624 			xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
625 			goto out;
626 		}
627 		break;
628 	case XFS_ATTR_FORK:
629 		if (!ifp)
630 			goto out_check_rmap;
631 		if (!xfs_sb_version_hasattr(&mp->m_sb) &&
632 		    !xfs_sb_version_hasattr2(&mp->m_sb))
633 			xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
634 		break;
635 	default:
636 		ASSERT(whichfork == XFS_DATA_FORK);
637 		break;
638 	}
639 
640 	/* Check the fork values */
641 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
642 	case XFS_DINODE_FMT_UUID:
643 	case XFS_DINODE_FMT_DEV:
644 	case XFS_DINODE_FMT_LOCAL:
645 		/* No mappings to check. */
646 		goto out;
647 	case XFS_DINODE_FMT_EXTENTS:
648 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
649 			xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
650 			goto out;
651 		}
652 		break;
653 	case XFS_DINODE_FMT_BTREE:
654 		if (whichfork == XFS_COW_FORK) {
655 			xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
656 			goto out;
657 		}
658 
659 		error = xfs_scrub_bmap_btree(sc, whichfork, &info);
660 		if (error)
661 			goto out;
662 		break;
663 	default:
664 		xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
665 		goto out;
666 	}
667 
668 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
669 		goto out;
670 
671 	/* Now try to scrub the in-memory extent list. */
672         if (!(ifp->if_flags & XFS_IFEXTENTS)) {
673 		error = xfs_iread_extents(sc->tp, ip, whichfork);
674 		if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
675 			goto out;
676 	}
677 
678 	/* Find the offset of the last extent in the mapping. */
679 	error = xfs_bmap_last_offset(ip, &endoff, whichfork);
680 	if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
681 		goto out;
682 
683 	/* Scrub extent records. */
684 	info.lastoff = 0;
685 	ifp = XFS_IFORK_PTR(ip, whichfork);
686 	for_each_xfs_iext(ifp, &icur, &irec) {
687 		if (xfs_scrub_should_terminate(sc, &error))
688 			break;
689 		if (isnullstartblock(irec.br_startblock))
690 			continue;
691 		if (irec.br_startoff >= endoff) {
692 			xfs_scrub_fblock_set_corrupt(sc, whichfork,
693 					irec.br_startoff);
694 			goto out;
695 		}
696 		error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec);
697 		if (error)
698 			goto out;
699 	}
700 
701 out_check_rmap:
702 	error = xfs_scrub_bmap_check_rmaps(sc, whichfork);
703 	if (!xfs_scrub_fblock_xref_process_error(sc, whichfork, 0, &error))
704 		goto out;
705 out:
706 	return error;
707 }
708 
709 /* Scrub an inode's data fork. */
710 int
711 xfs_scrub_bmap_data(
712 	struct xfs_scrub_context	*sc)
713 {
714 	return xfs_scrub_bmap(sc, XFS_DATA_FORK);
715 }
716 
717 /* Scrub an inode's attr fork. */
718 int
719 xfs_scrub_bmap_attr(
720 	struct xfs_scrub_context	*sc)
721 {
722 	return xfs_scrub_bmap(sc, XFS_ATTR_FORK);
723 }
724 
725 /* Scrub an inode's CoW fork. */
726 int
727 xfs_scrub_bmap_cow(
728 	struct xfs_scrub_context	*sc)
729 {
730 	if (!xfs_is_reflink_inode(sc->ip))
731 		return -ENOENT;
732 
733 	return xfs_scrub_bmap(sc, XFS_COW_FORK);
734 }
735