xref: /openbmc/linux/fs/xfs/scrub/bmap.c (revision 249592bf)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_bit.h"
14 #include "xfs_log_format.h"
15 #include "xfs_trans.h"
16 #include "xfs_inode.h"
17 #include "xfs_alloc.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_btree.h"
20 #include "xfs_rmap.h"
21 #include "xfs_rmap_btree.h"
22 #include "scrub/scrub.h"
23 #include "scrub/common.h"
24 #include "scrub/btree.h"
25 
26 /* Set us up with an inode's bmap. */
27 int
28 xchk_setup_inode_bmap(
29 	struct xfs_scrub	*sc)
30 {
31 	int			error;
32 
33 	error = xchk_get_inode(sc);
34 	if (error)
35 		goto out;
36 
37 	sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
38 	xfs_ilock(sc->ip, sc->ilock_flags);
39 
40 	/*
41 	 * We don't want any ephemeral data fork updates sitting around
42 	 * while we inspect block mappings, so wait for directio to finish
43 	 * and flush dirty data if we have delalloc reservations.
44 	 */
45 	if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
46 	    sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
47 		struct address_space	*mapping = VFS_I(sc->ip)->i_mapping;
48 
49 		inode_dio_wait(VFS_I(sc->ip));
50 
51 		/*
52 		 * Try to flush all incore state to disk before we examine the
53 		 * space mappings for the data fork.  Leave accumulated errors
54 		 * in the mapping for the writer threads to consume.
55 		 *
56 		 * On ENOSPC or EIO writeback errors, we continue into the
57 		 * extent mapping checks because write failures do not
58 		 * necessarily imply anything about the correctness of the file
59 		 * metadata.  The metadata and the file data could be on
60 		 * completely separate devices; a media failure might only
61 		 * affect a subset of the disk, etc.  We can handle delalloc
62 		 * extents in the scrubber, so leaving them in memory is fine.
63 		 */
64 		error = filemap_fdatawrite(mapping);
65 		if (!error)
66 			error = filemap_fdatawait_keep_errors(mapping);
67 		if (error && (error != -ENOSPC && error != -EIO))
68 			goto out;
69 	}
70 
71 	/* Got the inode, lock it and we're ready to go. */
72 	error = xchk_trans_alloc(sc, 0);
73 	if (error)
74 		goto out;
75 	sc->ilock_flags |= XFS_ILOCK_EXCL;
76 	xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
77 
78 out:
79 	/* scrub teardown will unlock and release the inode */
80 	return error;
81 }
82 
83 /*
84  * Inode fork block mapping (BMBT) scrubber.
85  * More complex than the others because we have to scrub
86  * all the extents regardless of whether or not the fork
87  * is in btree format.
88  */
89 
90 struct xchk_bmap_info {
91 	struct xfs_scrub	*sc;
92 	xfs_fileoff_t		lastoff;
93 	bool			is_rt;
94 	bool			is_shared;
95 	bool			was_loaded;
96 	int			whichfork;
97 };
98 
99 /* Look for a corresponding rmap for this irec. */
100 static inline bool
101 xchk_bmap_get_rmap(
102 	struct xchk_bmap_info	*info,
103 	struct xfs_bmbt_irec	*irec,
104 	xfs_agblock_t		agbno,
105 	uint64_t		owner,
106 	struct xfs_rmap_irec	*rmap)
107 {
108 	xfs_fileoff_t		offset;
109 	unsigned int		rflags = 0;
110 	int			has_rmap;
111 	int			error;
112 
113 	if (info->whichfork == XFS_ATTR_FORK)
114 		rflags |= XFS_RMAP_ATTR_FORK;
115 	if (irec->br_state == XFS_EXT_UNWRITTEN)
116 		rflags |= XFS_RMAP_UNWRITTEN;
117 
118 	/*
119 	 * CoW staging extents are owned (on disk) by the refcountbt, so
120 	 * their rmaps do not have offsets.
121 	 */
122 	if (info->whichfork == XFS_COW_FORK)
123 		offset = 0;
124 	else
125 		offset = irec->br_startoff;
126 
127 	/*
128 	 * If the caller thinks this could be a shared bmbt extent (IOWs,
129 	 * any data fork extent of a reflink inode) then we have to use the
130 	 * range rmap lookup to make sure we get the correct owner/offset.
131 	 */
132 	if (info->is_shared) {
133 		error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
134 				owner, offset, rflags, rmap, &has_rmap);
135 		if (!xchk_should_check_xref(info->sc, &error,
136 				&info->sc->sa.rmap_cur))
137 			return false;
138 		goto out;
139 	}
140 
141 	/*
142 	 * Otherwise, use the (faster) regular lookup.
143 	 */
144 	error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
145 			offset, rflags, &has_rmap);
146 	if (!xchk_should_check_xref(info->sc, &error,
147 			&info->sc->sa.rmap_cur))
148 		return false;
149 	if (!has_rmap)
150 		goto out;
151 
152 	error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
153 	if (!xchk_should_check_xref(info->sc, &error,
154 			&info->sc->sa.rmap_cur))
155 		return false;
156 
157 out:
158 	if (!has_rmap)
159 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
160 			irec->br_startoff);
161 	return has_rmap;
162 }
163 
164 /* Make sure that we have rmapbt records for this extent. */
165 STATIC void
166 xchk_bmap_xref_rmap(
167 	struct xchk_bmap_info	*info,
168 	struct xfs_bmbt_irec	*irec,
169 	xfs_agblock_t		agbno)
170 {
171 	struct xfs_rmap_irec	rmap;
172 	unsigned long long	rmap_end;
173 	uint64_t		owner;
174 
175 	if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
176 		return;
177 
178 	if (info->whichfork == XFS_COW_FORK)
179 		owner = XFS_RMAP_OWN_COW;
180 	else
181 		owner = info->sc->ip->i_ino;
182 
183 	/* Find the rmap record for this irec. */
184 	if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
185 		return;
186 
187 	/* Check the rmap. */
188 	rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
189 	if (rmap.rm_startblock > agbno ||
190 	    agbno + irec->br_blockcount > rmap_end)
191 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
192 				irec->br_startoff);
193 
194 	/*
195 	 * Check the logical offsets if applicable.  CoW staging extents
196 	 * don't track logical offsets since the mappings only exist in
197 	 * memory.
198 	 */
199 	if (info->whichfork != XFS_COW_FORK) {
200 		rmap_end = (unsigned long long)rmap.rm_offset +
201 				rmap.rm_blockcount;
202 		if (rmap.rm_offset > irec->br_startoff ||
203 		    irec->br_startoff + irec->br_blockcount > rmap_end)
204 			xchk_fblock_xref_set_corrupt(info->sc,
205 					info->whichfork, irec->br_startoff);
206 	}
207 
208 	if (rmap.rm_owner != owner)
209 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
210 				irec->br_startoff);
211 
212 	/*
213 	 * Check for discrepancies between the unwritten flag in the irec and
214 	 * the rmap.  Note that the (in-memory) CoW fork distinguishes between
215 	 * unwritten and written extents, but we don't track that in the rmap
216 	 * records because the blocks are owned (on-disk) by the refcountbt,
217 	 * which doesn't track unwritten state.
218 	 */
219 	if (owner != XFS_RMAP_OWN_COW &&
220 	    !!(irec->br_state == XFS_EXT_UNWRITTEN) !=
221 	    !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
222 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
223 				irec->br_startoff);
224 
225 	if (!!(info->whichfork == XFS_ATTR_FORK) !=
226 	    !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
227 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
228 				irec->br_startoff);
229 	if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
230 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
231 				irec->br_startoff);
232 }
233 
234 /* Cross-reference a single rtdev extent record. */
235 STATIC void
236 xchk_bmap_rt_iextent_xref(
237 	struct xfs_inode	*ip,
238 	struct xchk_bmap_info	*info,
239 	struct xfs_bmbt_irec	*irec)
240 {
241 	xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
242 			irec->br_blockcount);
243 }
244 
245 /* Cross-reference a single datadev extent record. */
246 STATIC void
247 xchk_bmap_iextent_xref(
248 	struct xfs_inode	*ip,
249 	struct xchk_bmap_info	*info,
250 	struct xfs_bmbt_irec	*irec)
251 {
252 	struct xfs_mount	*mp = info->sc->mp;
253 	xfs_agnumber_t		agno;
254 	xfs_agblock_t		agbno;
255 	xfs_extlen_t		len;
256 	int			error;
257 
258 	agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
259 	agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
260 	len = irec->br_blockcount;
261 
262 	error = xchk_ag_init(info->sc, agno, &info->sc->sa);
263 	if (!xchk_fblock_process_error(info->sc, info->whichfork,
264 			irec->br_startoff, &error))
265 		return;
266 
267 	xchk_xref_is_used_space(info->sc, agbno, len);
268 	xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
269 	xchk_bmap_xref_rmap(info, irec, agbno);
270 	switch (info->whichfork) {
271 	case XFS_DATA_FORK:
272 		if (xfs_is_reflink_inode(info->sc->ip))
273 			break;
274 		/* fall through */
275 	case XFS_ATTR_FORK:
276 		xchk_xref_is_not_shared(info->sc, agbno,
277 				irec->br_blockcount);
278 		break;
279 	case XFS_COW_FORK:
280 		xchk_xref_is_cow_staging(info->sc, agbno,
281 				irec->br_blockcount);
282 		break;
283 	}
284 
285 	xchk_ag_free(info->sc, &info->sc->sa);
286 }
287 
288 /*
289  * Directories and attr forks should never have blocks that can't be addressed
290  * by a xfs_dablk_t.
291  */
292 STATIC void
293 xchk_bmap_dirattr_extent(
294 	struct xfs_inode	*ip,
295 	struct xchk_bmap_info	*info,
296 	struct xfs_bmbt_irec	*irec)
297 {
298 	struct xfs_mount	*mp = ip->i_mount;
299 	xfs_fileoff_t		off;
300 
301 	if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK)
302 		return;
303 
304 	if (!xfs_verify_dablk(mp, irec->br_startoff))
305 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
306 				irec->br_startoff);
307 
308 	off = irec->br_startoff + irec->br_blockcount - 1;
309 	if (!xfs_verify_dablk(mp, off))
310 		xchk_fblock_set_corrupt(info->sc, info->whichfork, off);
311 }
312 
313 /* Scrub a single extent record. */
314 STATIC int
315 xchk_bmap_iextent(
316 	struct xfs_inode	*ip,
317 	struct xchk_bmap_info	*info,
318 	struct xfs_bmbt_irec	*irec)
319 {
320 	struct xfs_mount	*mp = info->sc->mp;
321 	int			error = 0;
322 
323 	/*
324 	 * Check for out-of-order extents.  This record could have come
325 	 * from the incore list, for which there is no ordering check.
326 	 */
327 	if (irec->br_startoff < info->lastoff)
328 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
329 				irec->br_startoff);
330 
331 	if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
332 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
333 				irec->br_startoff);
334 
335 	xchk_bmap_dirattr_extent(ip, info, irec);
336 
337 	/* There should never be a "hole" extent in either extent list. */
338 	if (irec->br_startblock == HOLESTARTBLOCK)
339 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
340 				irec->br_startoff);
341 
342 	/*
343 	 * Check for delalloc extents.  We never iterate the ones in the
344 	 * in-core extent scan, and we should never see these in the bmbt.
345 	 */
346 	if (isnullstartblock(irec->br_startblock))
347 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
348 				irec->br_startoff);
349 
350 	/* Make sure the extent points to a valid place. */
351 	if (irec->br_blockcount > MAXEXTLEN)
352 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
353 				irec->br_startoff);
354 	if (info->is_rt &&
355 	    !xfs_verify_rtext(mp, irec->br_startblock, irec->br_blockcount))
356 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
357 				irec->br_startoff);
358 	if (!info->is_rt &&
359 	    !xfs_verify_fsbext(mp, irec->br_startblock, irec->br_blockcount))
360 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
361 				irec->br_startoff);
362 
363 	/* We don't allow unwritten extents on attr forks. */
364 	if (irec->br_state == XFS_EXT_UNWRITTEN &&
365 	    info->whichfork == XFS_ATTR_FORK)
366 		xchk_fblock_set_corrupt(info->sc, info->whichfork,
367 				irec->br_startoff);
368 
369 	if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
370 		return 0;
371 
372 	if (info->is_rt)
373 		xchk_bmap_rt_iextent_xref(ip, info, irec);
374 	else
375 		xchk_bmap_iextent_xref(ip, info, irec);
376 
377 	info->lastoff = irec->br_startoff + irec->br_blockcount;
378 	return error;
379 }
380 
381 /* Scrub a bmbt record. */
382 STATIC int
383 xchk_bmapbt_rec(
384 	struct xchk_btree	*bs,
385 	union xfs_btree_rec	*rec)
386 {
387 	struct xfs_bmbt_irec	irec;
388 	struct xfs_bmbt_irec	iext_irec;
389 	struct xfs_iext_cursor	icur;
390 	struct xchk_bmap_info	*info = bs->private;
391 	struct xfs_inode	*ip = bs->cur->bc_ino.ip;
392 	struct xfs_buf		*bp = NULL;
393 	struct xfs_btree_block	*block;
394 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, info->whichfork);
395 	uint64_t		owner;
396 	int			i;
397 
398 	/*
399 	 * Check the owners of the btree blocks up to the level below
400 	 * the root since the verifiers don't do that.
401 	 */
402 	if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
403 	    bs->cur->bc_ptrs[0] == 1) {
404 		for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
405 			block = xfs_btree_get_block(bs->cur, i, &bp);
406 			owner = be64_to_cpu(block->bb_u.l.bb_owner);
407 			if (owner != ip->i_ino)
408 				xchk_fblock_set_corrupt(bs->sc,
409 						info->whichfork, 0);
410 		}
411 	}
412 
413 	/*
414 	 * Check that the incore extent tree contains an extent that matches
415 	 * this one exactly.  We validate those cached bmaps later, so we don't
416 	 * need to check them here.  If the incore extent tree was just loaded
417 	 * from disk by the scrubber, we assume that its contents match what's
418 	 * on disk (we still hold the ILOCK) and skip the equivalence check.
419 	 */
420 	if (!info->was_loaded)
421 		return 0;
422 
423 	xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
424 	if (!xfs_iext_lookup_extent(ip, ifp, irec.br_startoff, &icur,
425 				&iext_irec) ||
426 	    irec.br_startoff != iext_irec.br_startoff ||
427 	    irec.br_startblock != iext_irec.br_startblock ||
428 	    irec.br_blockcount != iext_irec.br_blockcount ||
429 	    irec.br_state != iext_irec.br_state)
430 		xchk_fblock_set_corrupt(bs->sc, info->whichfork,
431 				irec.br_startoff);
432 	return 0;
433 }
434 
435 /* Scan the btree records. */
436 STATIC int
437 xchk_bmap_btree(
438 	struct xfs_scrub	*sc,
439 	int			whichfork,
440 	struct xchk_bmap_info	*info)
441 {
442 	struct xfs_owner_info	oinfo;
443 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(sc->ip, whichfork);
444 	struct xfs_mount	*mp = sc->mp;
445 	struct xfs_inode	*ip = sc->ip;
446 	struct xfs_btree_cur	*cur;
447 	int			error;
448 
449 	/* Load the incore bmap cache if it's not loaded. */
450 	info->was_loaded = !xfs_need_iread_extents(ifp);
451 
452 	error = xfs_iread_extents(sc->tp, ip, whichfork);
453 	if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
454 		goto out;
455 
456 	/* Check the btree structure. */
457 	cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
458 	xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
459 	error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
460 	xfs_btree_del_cursor(cur, error);
461 out:
462 	return error;
463 }
464 
465 struct xchk_bmap_check_rmap_info {
466 	struct xfs_scrub	*sc;
467 	int			whichfork;
468 	struct xfs_iext_cursor	icur;
469 };
470 
471 /* Can we find bmaps that fit this rmap? */
472 STATIC int
473 xchk_bmap_check_rmap(
474 	struct xfs_btree_cur		*cur,
475 	struct xfs_rmap_irec		*rec,
476 	void				*priv)
477 {
478 	struct xfs_bmbt_irec		irec;
479 	struct xchk_bmap_check_rmap_info	*sbcri = priv;
480 	struct xfs_ifork		*ifp;
481 	struct xfs_scrub		*sc = sbcri->sc;
482 	bool				have_map;
483 
484 	/* Is this even the right fork? */
485 	if (rec->rm_owner != sc->ip->i_ino)
486 		return 0;
487 	if ((sbcri->whichfork == XFS_ATTR_FORK) ^
488 	    !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
489 		return 0;
490 	if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
491 		return 0;
492 
493 	/* Now look up the bmbt record. */
494 	ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
495 	if (!ifp) {
496 		xchk_fblock_set_corrupt(sc, sbcri->whichfork,
497 				rec->rm_offset);
498 		goto out;
499 	}
500 	have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
501 			&sbcri->icur, &irec);
502 	if (!have_map)
503 		xchk_fblock_set_corrupt(sc, sbcri->whichfork,
504 				rec->rm_offset);
505 	/*
506 	 * bmap extent record lengths are constrained to 2^21 blocks in length
507 	 * because of space constraints in the on-disk metadata structure.
508 	 * However, rmap extent record lengths are constrained only by AG
509 	 * length, so we have to loop through the bmbt to make sure that the
510 	 * entire rmap is covered by bmbt records.
511 	 */
512 	while (have_map) {
513 		if (irec.br_startoff != rec->rm_offset)
514 			xchk_fblock_set_corrupt(sc, sbcri->whichfork,
515 					rec->rm_offset);
516 		if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
517 				cur->bc_ag.agno, rec->rm_startblock))
518 			xchk_fblock_set_corrupt(sc, sbcri->whichfork,
519 					rec->rm_offset);
520 		if (irec.br_blockcount > rec->rm_blockcount)
521 			xchk_fblock_set_corrupt(sc, sbcri->whichfork,
522 					rec->rm_offset);
523 		if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
524 			break;
525 		rec->rm_startblock += irec.br_blockcount;
526 		rec->rm_offset += irec.br_blockcount;
527 		rec->rm_blockcount -= irec.br_blockcount;
528 		if (rec->rm_blockcount == 0)
529 			break;
530 		have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
531 		if (!have_map)
532 			xchk_fblock_set_corrupt(sc, sbcri->whichfork,
533 					rec->rm_offset);
534 	}
535 
536 out:
537 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
538 		return -ECANCELED;
539 	return 0;
540 }
541 
542 /* Make sure each rmap has a corresponding bmbt entry. */
543 STATIC int
544 xchk_bmap_check_ag_rmaps(
545 	struct xfs_scrub		*sc,
546 	int				whichfork,
547 	xfs_agnumber_t			agno)
548 {
549 	struct xchk_bmap_check_rmap_info	sbcri;
550 	struct xfs_btree_cur		*cur;
551 	struct xfs_buf			*agf;
552 	int				error;
553 
554 	error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
555 	if (error)
556 		return error;
557 
558 	cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
559 
560 	sbcri.sc = sc;
561 	sbcri.whichfork = whichfork;
562 	error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
563 	if (error == -ECANCELED)
564 		error = 0;
565 
566 	xfs_btree_del_cursor(cur, error);
567 	xfs_trans_brelse(sc->tp, agf);
568 	return error;
569 }
570 
571 /* Make sure each rmap has a corresponding bmbt entry. */
572 STATIC int
573 xchk_bmap_check_rmaps(
574 	struct xfs_scrub	*sc,
575 	int			whichfork)
576 {
577 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(sc->ip, whichfork);
578 	xfs_agnumber_t		agno;
579 	bool			zero_size;
580 	int			error;
581 
582 	if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
583 	    whichfork == XFS_COW_FORK ||
584 	    (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
585 		return 0;
586 
587 	/* Don't support realtime rmap checks yet. */
588 	if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
589 		return 0;
590 
591 	ASSERT(XFS_IFORK_PTR(sc->ip, whichfork) != NULL);
592 
593 	/*
594 	 * Only do this for complex maps that are in btree format, or for
595 	 * situations where we would seem to have a size but zero extents.
596 	 * The inode repair code can zap broken iforks, which means we have
597 	 * to flag this bmap as corrupt if there are rmaps that need to be
598 	 * reattached.
599 	 */
600 
601 	if (whichfork == XFS_DATA_FORK)
602 		zero_size = i_size_read(VFS_I(sc->ip)) == 0;
603 	else
604 		zero_size = false;
605 
606 	if (ifp->if_format != XFS_DINODE_FMT_BTREE &&
607 	    (zero_size || ifp->if_nextents > 0))
608 		return 0;
609 
610 	for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
611 		error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
612 		if (error)
613 			return error;
614 		if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
615 			break;
616 	}
617 
618 	return 0;
619 }
620 
621 /*
622  * Scrub an inode fork's block mappings.
623  *
624  * First we scan every record in every btree block, if applicable.
625  * Then we unconditionally scan the incore extent cache.
626  */
627 STATIC int
628 xchk_bmap(
629 	struct xfs_scrub	*sc,
630 	int			whichfork)
631 {
632 	struct xfs_bmbt_irec	irec;
633 	struct xchk_bmap_info	info = { NULL };
634 	struct xfs_mount	*mp = sc->mp;
635 	struct xfs_inode	*ip = sc->ip;
636 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
637 	xfs_fileoff_t		endoff;
638 	struct xfs_iext_cursor	icur;
639 	int			error = 0;
640 
641 	/* Non-existent forks can be ignored. */
642 	if (!ifp)
643 		goto out;
644 
645 	info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
646 	info.whichfork = whichfork;
647 	info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
648 	info.sc = sc;
649 
650 	switch (whichfork) {
651 	case XFS_COW_FORK:
652 		/* No CoW forks on non-reflink inodes/filesystems. */
653 		if (!xfs_is_reflink_inode(ip)) {
654 			xchk_ino_set_corrupt(sc, sc->ip->i_ino);
655 			goto out;
656 		}
657 		break;
658 	case XFS_ATTR_FORK:
659 		if (!xfs_sb_version_hasattr(&mp->m_sb) &&
660 		    !xfs_sb_version_hasattr2(&mp->m_sb))
661 			xchk_ino_set_corrupt(sc, sc->ip->i_ino);
662 		break;
663 	default:
664 		ASSERT(whichfork == XFS_DATA_FORK);
665 		break;
666 	}
667 
668 	/* Check the fork values */
669 	switch (ifp->if_format) {
670 	case XFS_DINODE_FMT_UUID:
671 	case XFS_DINODE_FMT_DEV:
672 	case XFS_DINODE_FMT_LOCAL:
673 		/* No mappings to check. */
674 		goto out;
675 	case XFS_DINODE_FMT_EXTENTS:
676 		break;
677 	case XFS_DINODE_FMT_BTREE:
678 		if (whichfork == XFS_COW_FORK) {
679 			xchk_fblock_set_corrupt(sc, whichfork, 0);
680 			goto out;
681 		}
682 
683 		error = xchk_bmap_btree(sc, whichfork, &info);
684 		if (error)
685 			goto out;
686 		break;
687 	default:
688 		xchk_fblock_set_corrupt(sc, whichfork, 0);
689 		goto out;
690 	}
691 
692 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
693 		goto out;
694 
695 	/* Find the offset of the last extent in the mapping. */
696 	error = xfs_bmap_last_offset(ip, &endoff, whichfork);
697 	if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
698 		goto out;
699 
700 	/* Scrub extent records. */
701 	info.lastoff = 0;
702 	ifp = XFS_IFORK_PTR(ip, whichfork);
703 	for_each_xfs_iext(ifp, &icur, &irec) {
704 		if (xchk_should_terminate(sc, &error) ||
705 		    (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
706 			goto out;
707 		if (isnullstartblock(irec.br_startblock))
708 			continue;
709 		if (irec.br_startoff >= endoff) {
710 			xchk_fblock_set_corrupt(sc, whichfork,
711 					irec.br_startoff);
712 			goto out;
713 		}
714 		error = xchk_bmap_iextent(ip, &info, &irec);
715 		if (error)
716 			goto out;
717 	}
718 
719 	error = xchk_bmap_check_rmaps(sc, whichfork);
720 	if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
721 		goto out;
722 out:
723 	return error;
724 }
725 
726 /* Scrub an inode's data fork. */
727 int
728 xchk_bmap_data(
729 	struct xfs_scrub	*sc)
730 {
731 	return xchk_bmap(sc, XFS_DATA_FORK);
732 }
733 
734 /* Scrub an inode's attr fork. */
735 int
736 xchk_bmap_attr(
737 	struct xfs_scrub	*sc)
738 {
739 	return xchk_bmap(sc, XFS_ATTR_FORK);
740 }
741 
742 /* Scrub an inode's CoW fork. */
743 int
744 xchk_bmap_cow(
745 	struct xfs_scrub	*sc)
746 {
747 	if (!xfs_is_reflink_inode(sc->ip))
748 		return -ENOENT;
749 
750 	return xchk_bmap(sc, XFS_COW_FORK);
751 }
752