xref: /openbmc/linux/fs/xfs/scrub/bmap.c (revision b830f94f)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_bit.h"
14 #include "xfs_log_format.h"
15 #include "xfs_trans.h"
16 #include "xfs_inode.h"
17 #include "xfs_alloc.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_btree.h"
20 #include "xfs_rmap.h"
21 #include "xfs_rmap_btree.h"
22 #include "scrub/scrub.h"
23 #include "scrub/common.h"
24 #include "scrub/btree.h"
25 
26 /* Set us up with an inode's bmap. */
27 int
28 xchk_setup_inode_bmap(
29 	struct xfs_scrub	*sc,
30 	struct xfs_inode	*ip)
31 {
32 	int			error;
33 
34 	error = xchk_get_inode(sc, ip);
35 	if (error)
36 		goto out;
37 
38 	sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
39 	xfs_ilock(sc->ip, sc->ilock_flags);
40 
41 	/*
42 	 * We don't want any ephemeral data fork updates sitting around
43 	 * while we inspect block mappings, so wait for directio to finish
44 	 * and flush dirty data if we have delalloc reservations.
45 	 */
46 	if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
47 	    sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
48 		inode_dio_wait(VFS_I(sc->ip));
49 		error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
50 		if (error)
51 			goto out;
52 	}
53 
54 	/* Got the inode, lock it and we're ready to go. */
55 	error = xchk_trans_alloc(sc, 0);
56 	if (error)
57 		goto out;
58 	sc->ilock_flags |= XFS_ILOCK_EXCL;
59 	xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
60 
61 out:
62 	/* scrub teardown will unlock and release the inode */
63 	return error;
64 }
65 
66 /*
67  * Inode fork block mapping (BMBT) scrubber.
68  * More complex than the others because we have to scrub
69  * all the extents regardless of whether or not the fork
70  * is in btree format.
71  */
72 
73 struct xchk_bmap_info {
74 	struct xfs_scrub	*sc;
75 	xfs_fileoff_t		lastoff;
76 	bool			is_rt;
77 	bool			is_shared;
78 	int			whichfork;
79 };
80 
81 /* Look for a corresponding rmap for this irec. */
82 static inline bool
83 xchk_bmap_get_rmap(
84 	struct xchk_bmap_info	*info,
85 	struct xfs_bmbt_irec	*irec,
86 	xfs_agblock_t		agbno,
87 	uint64_t		owner,
88 	struct xfs_rmap_irec	*rmap)
89 {
90 	xfs_fileoff_t		offset;
91 	unsigned int		rflags = 0;
92 	int			has_rmap;
93 	int			error;
94 
95 	if (info->whichfork == XFS_ATTR_FORK)
96 		rflags |= XFS_RMAP_ATTR_FORK;
97 
98 	/*
99 	 * CoW staging extents are owned (on disk) by the refcountbt, so
100 	 * their rmaps do not have offsets.
101 	 */
102 	if (info->whichfork == XFS_COW_FORK)
103 		offset = 0;
104 	else
105 		offset = irec->br_startoff;
106 
107 	/*
108 	 * If the caller thinks this could be a shared bmbt extent (IOWs,
109 	 * any data fork extent of a reflink inode) then we have to use the
110 	 * range rmap lookup to make sure we get the correct owner/offset.
111 	 */
112 	if (info->is_shared) {
113 		error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
114 				owner, offset, rflags, rmap, &has_rmap);
115 		if (!xchk_should_check_xref(info->sc, &error,
116 				&info->sc->sa.rmap_cur))
117 			return false;
118 		goto out;
119 	}
120 
121 	/*
122 	 * Otherwise, use the (faster) regular lookup.
123 	 */
124 	error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
125 			offset, rflags, &has_rmap);
126 	if (!xchk_should_check_xref(info->sc, &error,
127 			&info->sc->sa.rmap_cur))
128 		return false;
129 	if (!has_rmap)
130 		goto out;
131 
132 	error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
133 	if (!xchk_should_check_xref(info->sc, &error,
134 			&info->sc->sa.rmap_cur))
135 		return false;
136 
137 out:
138 	if (!has_rmap)
139 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
140 			irec->br_startoff);
141 	return has_rmap;
142 }
143 
144 /* Make sure that we have rmapbt records for this extent. */
145 STATIC void
146 xchk_bmap_xref_rmap(
147 	struct xchk_bmap_info	*info,
148 	struct xfs_bmbt_irec	*irec,
149 	xfs_agblock_t		agbno)
150 {
151 	struct xfs_rmap_irec	rmap;
152 	unsigned long long	rmap_end;
153 	uint64_t		owner;
154 
155 	if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
156 		return;
157 
158 	if (info->whichfork == XFS_COW_FORK)
159 		owner = XFS_RMAP_OWN_COW;
160 	else
161 		owner = info->sc->ip->i_ino;
162 
163 	/* Find the rmap record for this irec. */
164 	if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
165 		return;
166 
167 	/* Check the rmap. */
168 	rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
169 	if (rmap.rm_startblock > agbno ||
170 	    agbno + irec->br_blockcount > rmap_end)
171 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
172 				irec->br_startoff);
173 
174 	/*
175 	 * Check the logical offsets if applicable.  CoW staging extents
176 	 * don't track logical offsets since the mappings only exist in
177 	 * memory.
178 	 */
179 	if (info->whichfork != XFS_COW_FORK) {
180 		rmap_end = (unsigned long long)rmap.rm_offset +
181 				rmap.rm_blockcount;
182 		if (rmap.rm_offset > irec->br_startoff ||
183 		    irec->br_startoff + irec->br_blockcount > rmap_end)
184 			xchk_fblock_xref_set_corrupt(info->sc,
185 					info->whichfork, irec->br_startoff);
186 	}
187 
188 	if (rmap.rm_owner != owner)
189 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
190 				irec->br_startoff);
191 
192 	/*
193 	 * Check for discrepancies between the unwritten flag in the irec and
194 	 * the rmap.  Note that the (in-memory) CoW fork distinguishes between
195 	 * unwritten and written extents, but we don't track that in the rmap
196 	 * records because the blocks are owned (on-disk) by the refcountbt,
197 	 * which doesn't track unwritten state.
198 	 */
199 	if (owner != XFS_RMAP_OWN_COW &&
200 	    irec->br_state == XFS_EXT_UNWRITTEN &&
201 	    !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
202 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
203 				irec->br_startoff);
204 
205 	if (info->whichfork == XFS_ATTR_FORK &&
206 	    !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
207 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
208 				irec->br_startoff);
209 	if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
210 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
211 				irec->br_startoff);
212 }
213 
214 /* Cross-reference a single rtdev extent record. */
215 STATIC void
216 xchk_bmap_rt_extent_xref(
217 	struct xchk_bmap_info	*info,
218 	struct xfs_inode	*ip,
219 	struct xfs_btree_cur	*cur,
220 	struct xfs_bmbt_irec	*irec)
221 {
222 	if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
223 		return;
224 
225 	xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
226 			irec->br_blockcount);
227 }
228 
229 /* Cross-reference a single datadev extent record. */
230 STATIC void
231 xchk_bmap_extent_xref(
232 	struct xchk_bmap_info	*info,
233 	struct xfs_inode	*ip,
234 	struct xfs_btree_cur	*cur,
235 	struct xfs_bmbt_irec	*irec)
236 {
237 	struct xfs_mount	*mp = info->sc->mp;
238 	xfs_agnumber_t		agno;
239 	xfs_agblock_t		agbno;
240 	xfs_extlen_t		len;
241 	int			error;
242 
243 	if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
244 		return;
245 
246 	agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
247 	agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
248 	len = irec->br_blockcount;
249 
250 	error = xchk_ag_init(info->sc, agno, &info->sc->sa);
251 	if (!xchk_fblock_process_error(info->sc, info->whichfork,
252 			irec->br_startoff, &error))
253 		return;
254 
255 	xchk_xref_is_used_space(info->sc, agbno, len);
256 	xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
257 	xchk_bmap_xref_rmap(info, irec, agbno);
258 	switch (info->whichfork) {
259 	case XFS_DATA_FORK:
260 		if (xfs_is_reflink_inode(info->sc->ip))
261 			break;
262 		/* fall through */
263 	case XFS_ATTR_FORK:
264 		xchk_xref_is_not_shared(info->sc, agbno,
265 				irec->br_blockcount);
266 		break;
267 	case XFS_COW_FORK:
268 		xchk_xref_is_cow_staging(info->sc, agbno,
269 				irec->br_blockcount);
270 		break;
271 	}
272 
273 	xchk_ag_free(info->sc, &info->sc->sa);
274 }
275 
276 /*
277  * Directories and attr forks should never have blocks that can't be addressed
278  * by a xfs_dablk_t.
279  */
280 STATIC void
281 xchk_bmap_dirattr_extent(
282 	struct xfs_inode	*ip,
283 	struct xchk_bmap_info	*info,
284 	struct xfs_bmbt_irec	*irec)
285 {
286 	struct xfs_mount	*mp = ip->i_mount;
287 	xfs_fileoff_t		off;
288 
289 	if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK)
290 		return;
291 
292 	if (!xfs_verify_dablk(mp, irec->br_startoff))
293 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
294 				irec->br_startoff);
295 
296 	off = irec->br_startoff + irec->br_blockcount - 1;
297 	if (!xfs_verify_dablk(mp, off))
298 		xchk_fblock_set_corrupt(info->sc, info->whichfork, off);
299 }
300 
301 /* Scrub a single extent record. */
302 STATIC int
303 xchk_bmap_extent(
304 	struct xfs_inode	*ip,
305 	struct xfs_btree_cur	*cur,
306 	struct xchk_bmap_info	*info,
307 	struct xfs_bmbt_irec	*irec)
308 {
309 	struct xfs_mount	*mp = info->sc->mp;
310 	struct xfs_buf		*bp = NULL;
311 	xfs_filblks_t		end;
312 	int			error = 0;
313 
314 	if (cur)
315 		xfs_btree_get_block(cur, 0, &bp);
316 
317 	/*
318 	 * Check for out-of-order extents.  This record could have come
319 	 * from the incore list, for which there is no ordering check.
320 	 */
321 	if (irec->br_startoff < info->lastoff)
322 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
323 				irec->br_startoff);
324 
325 	xchk_bmap_dirattr_extent(ip, info, irec);
326 
327 	/* There should never be a "hole" extent in either extent list. */
328 	if (irec->br_startblock == HOLESTARTBLOCK)
329 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
330 				irec->br_startoff);
331 
332 	/*
333 	 * Check for delalloc extents.  We never iterate the ones in the
334 	 * in-core extent scan, and we should never see these in the bmbt.
335 	 */
336 	if (isnullstartblock(irec->br_startblock))
337 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
338 				irec->br_startoff);
339 
340 	/* Make sure the extent points to a valid place. */
341 	if (irec->br_blockcount > MAXEXTLEN)
342 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
343 				irec->br_startoff);
344 	if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
345 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
346 				irec->br_startoff);
347 	end = irec->br_startblock + irec->br_blockcount - 1;
348 	if (info->is_rt &&
349 	    (!xfs_verify_rtbno(mp, irec->br_startblock) ||
350 	     !xfs_verify_rtbno(mp, end)))
351 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
352 				irec->br_startoff);
353 	if (!info->is_rt &&
354 	    (!xfs_verify_fsbno(mp, irec->br_startblock) ||
355 	     !xfs_verify_fsbno(mp, end) ||
356 	     XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
357 				XFS_FSB_TO_AGNO(mp, end)))
358 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
359 				irec->br_startoff);
360 
361 	/* We don't allow unwritten extents on attr forks. */
362 	if (irec->br_state == XFS_EXT_UNWRITTEN &&
363 	    info->whichfork == XFS_ATTR_FORK)
364 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
365 				irec->br_startoff);
366 
367 	if (info->is_rt)
368 		xchk_bmap_rt_extent_xref(info, ip, cur, irec);
369 	else
370 		xchk_bmap_extent_xref(info, ip, cur, irec);
371 
372 	info->lastoff = irec->br_startoff + irec->br_blockcount;
373 	return error;
374 }
375 
376 /* Scrub a bmbt record. */
377 STATIC int
378 xchk_bmapbt_rec(
379 	struct xchk_btree	*bs,
380 	union xfs_btree_rec	*rec)
381 {
382 	struct xfs_bmbt_irec	irec;
383 	struct xchk_bmap_info	*info = bs->private;
384 	struct xfs_inode	*ip = bs->cur->bc_private.b.ip;
385 	struct xfs_buf		*bp = NULL;
386 	struct xfs_btree_block	*block;
387 	uint64_t		owner;
388 	int			i;
389 
390 	/*
391 	 * Check the owners of the btree blocks up to the level below
392 	 * the root since the verifiers don't do that.
393 	 */
394 	if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
395 	    bs->cur->bc_ptrs[0] == 1) {
396 		for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
397 			block = xfs_btree_get_block(bs->cur, i, &bp);
398 			owner = be64_to_cpu(block->bb_u.l.bb_owner);
399 			if (owner != ip->i_ino)
400 				xchk_fblock_set_corrupt(bs->sc,
401 						info->whichfork, 0);
402 		}
403 	}
404 
405 	/* Set up the in-core record and scrub it. */
406 	xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
407 	return xchk_bmap_extent(ip, bs->cur, info, &irec);
408 }
409 
410 /* Scan the btree records. */
411 STATIC int
412 xchk_bmap_btree(
413 	struct xfs_scrub	*sc,
414 	int			whichfork,
415 	struct xchk_bmap_info	*info)
416 {
417 	struct xfs_owner_info	oinfo;
418 	struct xfs_mount	*mp = sc->mp;
419 	struct xfs_inode	*ip = sc->ip;
420 	struct xfs_btree_cur	*cur;
421 	int			error;
422 
423 	cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
424 	xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
425 	error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
426 	xfs_btree_del_cursor(cur, error);
427 	return error;
428 }
429 
430 struct xchk_bmap_check_rmap_info {
431 	struct xfs_scrub	*sc;
432 	int			whichfork;
433 	struct xfs_iext_cursor	icur;
434 };
435 
436 /* Can we find bmaps that fit this rmap? */
437 STATIC int
438 xchk_bmap_check_rmap(
439 	struct xfs_btree_cur		*cur,
440 	struct xfs_rmap_irec		*rec,
441 	void				*priv)
442 {
443 	struct xfs_bmbt_irec		irec;
444 	struct xchk_bmap_check_rmap_info	*sbcri = priv;
445 	struct xfs_ifork		*ifp;
446 	struct xfs_scrub		*sc = sbcri->sc;
447 	bool				have_map;
448 
449 	/* Is this even the right fork? */
450 	if (rec->rm_owner != sc->ip->i_ino)
451 		return 0;
452 	if ((sbcri->whichfork == XFS_ATTR_FORK) ^
453 	    !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
454 		return 0;
455 	if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
456 		return 0;
457 
458 	/* Now look up the bmbt record. */
459 	ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
460 	if (!ifp) {
461 		xchk_fblock_set_corrupt(sc, sbcri->whichfork,
462 				rec->rm_offset);
463 		goto out;
464 	}
465 	have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
466 			&sbcri->icur, &irec);
467 	if (!have_map)
468 		xchk_fblock_set_corrupt(sc, sbcri->whichfork,
469 				rec->rm_offset);
470 	/*
471 	 * bmap extent record lengths are constrained to 2^21 blocks in length
472 	 * because of space constraints in the on-disk metadata structure.
473 	 * However, rmap extent record lengths are constrained only by AG
474 	 * length, so we have to loop through the bmbt to make sure that the
475 	 * entire rmap is covered by bmbt records.
476 	 */
477 	while (have_map) {
478 		if (irec.br_startoff != rec->rm_offset)
479 			xchk_fblock_set_corrupt(sc, sbcri->whichfork,
480 					rec->rm_offset);
481 		if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
482 				cur->bc_private.a.agno, rec->rm_startblock))
483 			xchk_fblock_set_corrupt(sc, sbcri->whichfork,
484 					rec->rm_offset);
485 		if (irec.br_blockcount > rec->rm_blockcount)
486 			xchk_fblock_set_corrupt(sc, sbcri->whichfork,
487 					rec->rm_offset);
488 		if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
489 			break;
490 		rec->rm_startblock += irec.br_blockcount;
491 		rec->rm_offset += irec.br_blockcount;
492 		rec->rm_blockcount -= irec.br_blockcount;
493 		if (rec->rm_blockcount == 0)
494 			break;
495 		have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
496 		if (!have_map)
497 			xchk_fblock_set_corrupt(sc, sbcri->whichfork,
498 					rec->rm_offset);
499 	}
500 
501 out:
502 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
503 		return XFS_BTREE_QUERY_RANGE_ABORT;
504 	return 0;
505 }
506 
507 /* Make sure each rmap has a corresponding bmbt entry. */
508 STATIC int
509 xchk_bmap_check_ag_rmaps(
510 	struct xfs_scrub		*sc,
511 	int				whichfork,
512 	xfs_agnumber_t			agno)
513 {
514 	struct xchk_bmap_check_rmap_info	sbcri;
515 	struct xfs_btree_cur		*cur;
516 	struct xfs_buf			*agf;
517 	int				error;
518 
519 	error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
520 	if (error)
521 		return error;
522 
523 	cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
524 	if (!cur) {
525 		error = -ENOMEM;
526 		goto out_agf;
527 	}
528 
529 	sbcri.sc = sc;
530 	sbcri.whichfork = whichfork;
531 	error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
532 	if (error == XFS_BTREE_QUERY_RANGE_ABORT)
533 		error = 0;
534 
535 	xfs_btree_del_cursor(cur, error);
536 out_agf:
537 	xfs_trans_brelse(sc->tp, agf);
538 	return error;
539 }
540 
541 /* Make sure each rmap has a corresponding bmbt entry. */
542 STATIC int
543 xchk_bmap_check_rmaps(
544 	struct xfs_scrub	*sc,
545 	int			whichfork)
546 {
547 	loff_t			size;
548 	xfs_agnumber_t		agno;
549 	int			error;
550 
551 	if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
552 	    whichfork == XFS_COW_FORK ||
553 	    (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
554 		return 0;
555 
556 	/* Don't support realtime rmap checks yet. */
557 	if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
558 		return 0;
559 
560 	/*
561 	 * Only do this for complex maps that are in btree format, or for
562 	 * situations where we would seem to have a size but zero extents.
563 	 * The inode repair code can zap broken iforks, which means we have
564 	 * to flag this bmap as corrupt if there are rmaps that need to be
565 	 * reattached.
566 	 */
567 	switch (whichfork) {
568 	case XFS_DATA_FORK:
569 		size = i_size_read(VFS_I(sc->ip));
570 		break;
571 	case XFS_ATTR_FORK:
572 		size = XFS_IFORK_Q(sc->ip);
573 		break;
574 	default:
575 		size = 0;
576 		break;
577 	}
578 	if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE &&
579 	    (size == 0 || XFS_IFORK_NEXTENTS(sc->ip, whichfork) > 0))
580 		return 0;
581 
582 	for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
583 		error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
584 		if (error)
585 			return error;
586 		if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
587 			break;
588 	}
589 
590 	return 0;
591 }
592 
593 /*
594  * Scrub an inode fork's block mappings.
595  *
596  * First we scan every record in every btree block, if applicable.
597  * Then we unconditionally scan the incore extent cache.
598  */
599 STATIC int
600 xchk_bmap(
601 	struct xfs_scrub	*sc,
602 	int			whichfork)
603 {
604 	struct xfs_bmbt_irec	irec;
605 	struct xchk_bmap_info	info = { NULL };
606 	struct xfs_mount	*mp = sc->mp;
607 	struct xfs_inode	*ip = sc->ip;
608 	struct xfs_ifork	*ifp;
609 	xfs_fileoff_t		endoff;
610 	struct xfs_iext_cursor	icur;
611 	int			error = 0;
612 
613 	ifp = XFS_IFORK_PTR(ip, whichfork);
614 
615 	info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
616 	info.whichfork = whichfork;
617 	info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
618 	info.sc = sc;
619 
620 	switch (whichfork) {
621 	case XFS_COW_FORK:
622 		/* Non-existent CoW forks are ignorable. */
623 		if (!ifp)
624 			goto out;
625 		/* No CoW forks on non-reflink inodes/filesystems. */
626 		if (!xfs_is_reflink_inode(ip)) {
627 			xchk_ino_set_corrupt(sc, sc->ip->i_ino);
628 			goto out;
629 		}
630 		break;
631 	case XFS_ATTR_FORK:
632 		if (!ifp)
633 			goto out_check_rmap;
634 		if (!xfs_sb_version_hasattr(&mp->m_sb) &&
635 		    !xfs_sb_version_hasattr2(&mp->m_sb))
636 			xchk_ino_set_corrupt(sc, sc->ip->i_ino);
637 		break;
638 	default:
639 		ASSERT(whichfork == XFS_DATA_FORK);
640 		break;
641 	}
642 
643 	/* Check the fork values */
644 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
645 	case XFS_DINODE_FMT_UUID:
646 	case XFS_DINODE_FMT_DEV:
647 	case XFS_DINODE_FMT_LOCAL:
648 		/* No mappings to check. */
649 		goto out;
650 	case XFS_DINODE_FMT_EXTENTS:
651 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
652 			xchk_fblock_set_corrupt(sc, whichfork, 0);
653 			goto out;
654 		}
655 		break;
656 	case XFS_DINODE_FMT_BTREE:
657 		if (whichfork == XFS_COW_FORK) {
658 			xchk_fblock_set_corrupt(sc, whichfork, 0);
659 			goto out;
660 		}
661 
662 		error = xchk_bmap_btree(sc, whichfork, &info);
663 		if (error)
664 			goto out;
665 		break;
666 	default:
667 		xchk_fblock_set_corrupt(sc, whichfork, 0);
668 		goto out;
669 	}
670 
671 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
672 		goto out;
673 
674 	/* Now try to scrub the in-memory extent list. */
675         if (!(ifp->if_flags & XFS_IFEXTENTS)) {
676 		error = xfs_iread_extents(sc->tp, ip, whichfork);
677 		if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
678 			goto out;
679 	}
680 
681 	/* Find the offset of the last extent in the mapping. */
682 	error = xfs_bmap_last_offset(ip, &endoff, whichfork);
683 	if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
684 		goto out;
685 
686 	/* Scrub extent records. */
687 	info.lastoff = 0;
688 	ifp = XFS_IFORK_PTR(ip, whichfork);
689 	for_each_xfs_iext(ifp, &icur, &irec) {
690 		if (xchk_should_terminate(sc, &error) ||
691 		    (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
692 			break;
693 		if (isnullstartblock(irec.br_startblock))
694 			continue;
695 		if (irec.br_startoff >= endoff) {
696 			xchk_fblock_set_corrupt(sc, whichfork,
697 					irec.br_startoff);
698 			goto out;
699 		}
700 		error = xchk_bmap_extent(ip, NULL, &info, &irec);
701 		if (error)
702 			goto out;
703 	}
704 
705 out_check_rmap:
706 	error = xchk_bmap_check_rmaps(sc, whichfork);
707 	if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
708 		goto out;
709 out:
710 	return error;
711 }
712 
713 /* Scrub an inode's data fork. */
714 int
715 xchk_bmap_data(
716 	struct xfs_scrub	*sc)
717 {
718 	return xchk_bmap(sc, XFS_DATA_FORK);
719 }
720 
721 /* Scrub an inode's attr fork. */
722 int
723 xchk_bmap_attr(
724 	struct xfs_scrub	*sc)
725 {
726 	return xchk_bmap(sc, XFS_ATTR_FORK);
727 }
728 
729 /* Scrub an inode's CoW fork. */
730 int
731 xchk_bmap_cow(
732 	struct xfs_scrub	*sc)
733 {
734 	if (!xfs_is_reflink_inode(sc->ip))
735 		return -ENOENT;
736 
737 	return xchk_bmap(sc, XFS_COW_FORK);
738 }
739