xref: /openbmc/linux/fs/xfs/scrub/common.c (revision e0f6d1a5)
1 /*
2  * Copyright (C) 2017 Oracle.  All Rights Reserved.
3  *
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it would be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write the Free Software Foundation,
18  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
28 #include "xfs_bit.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
31 #include "xfs_sb.h"
32 #include "xfs_inode.h"
33 #include "xfs_icache.h"
34 #include "xfs_itable.h"
35 #include "xfs_alloc.h"
36 #include "xfs_alloc_btree.h"
37 #include "xfs_bmap.h"
38 #include "xfs_bmap_btree.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_ialloc_btree.h"
41 #include "xfs_refcount.h"
42 #include "xfs_refcount_btree.h"
43 #include "xfs_rmap.h"
44 #include "xfs_rmap_btree.h"
45 #include "xfs_log.h"
46 #include "xfs_trans_priv.h"
47 #include "scrub/xfs_scrub.h"
48 #include "scrub/scrub.h"
49 #include "scrub/common.h"
50 #include "scrub/trace.h"
51 #include "scrub/btree.h"
52 
53 /* Common code for the metadata scrubbers. */
54 
55 /*
56  * Handling operational errors.
57  *
58  * The *_process_error() family of functions are used to process error return
59  * codes from functions called as part of a scrub operation.
60  *
61  * If there's no error, we return true to tell the caller that it's ok
62  * to move on to the next check in its list.
63  *
64  * For non-verifier errors (e.g. ENOMEM) we return false to tell the
65  * caller that something bad happened, and we preserve *error so that
66  * the caller can return the *error up the stack to userspace.
67  *
68  * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting
69  * OFLAG_CORRUPT in sm_flags and the *error is cleared.  In other words,
70  * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT,
71  * not via return codes.  We return false to tell the caller that
72  * something bad happened.  Since the error has been cleared, the caller
73  * will (presumably) return that zero and scrubbing will move on to
74  * whatever's next.
75  *
76  * ftrace can be used to record the precise metadata location and the
77  * approximate code location of the failed operation.
78  */
79 
80 /* Check for operational errors. */
81 static bool
82 __xfs_scrub_process_error(
83 	struct xfs_scrub_context	*sc,
84 	xfs_agnumber_t			agno,
85 	xfs_agblock_t			bno,
86 	int				*error,
87 	__u32				errflag,
88 	void				*ret_ip)
89 {
90 	switch (*error) {
91 	case 0:
92 		return true;
93 	case -EDEADLOCK:
94 		/* Used to restart an op with deadlock avoidance. */
95 		trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
96 		break;
97 	case -EFSBADCRC:
98 	case -EFSCORRUPTED:
99 		/* Note the badness but don't abort. */
100 		sc->sm->sm_flags |= errflag;
101 		*error = 0;
102 		/* fall through */
103 	default:
104 		trace_xfs_scrub_op_error(sc, agno, bno, *error,
105 				ret_ip);
106 		break;
107 	}
108 	return false;
109 }
110 
111 bool
112 xfs_scrub_process_error(
113 	struct xfs_scrub_context	*sc,
114 	xfs_agnumber_t			agno,
115 	xfs_agblock_t			bno,
116 	int				*error)
117 {
118 	return __xfs_scrub_process_error(sc, agno, bno, error,
119 			XFS_SCRUB_OFLAG_CORRUPT, __return_address);
120 }
121 
122 bool
123 xfs_scrub_xref_process_error(
124 	struct xfs_scrub_context	*sc,
125 	xfs_agnumber_t			agno,
126 	xfs_agblock_t			bno,
127 	int				*error)
128 {
129 	return __xfs_scrub_process_error(sc, agno, bno, error,
130 			XFS_SCRUB_OFLAG_XFAIL, __return_address);
131 }
132 
133 /* Check for operational errors for a file offset. */
134 static bool
135 __xfs_scrub_fblock_process_error(
136 	struct xfs_scrub_context	*sc,
137 	int				whichfork,
138 	xfs_fileoff_t			offset,
139 	int				*error,
140 	__u32				errflag,
141 	void				*ret_ip)
142 {
143 	switch (*error) {
144 	case 0:
145 		return true;
146 	case -EDEADLOCK:
147 		/* Used to restart an op with deadlock avoidance. */
148 		trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
149 		break;
150 	case -EFSBADCRC:
151 	case -EFSCORRUPTED:
152 		/* Note the badness but don't abort. */
153 		sc->sm->sm_flags |= errflag;
154 		*error = 0;
155 		/* fall through */
156 	default:
157 		trace_xfs_scrub_file_op_error(sc, whichfork, offset, *error,
158 				ret_ip);
159 		break;
160 	}
161 	return false;
162 }
163 
164 bool
165 xfs_scrub_fblock_process_error(
166 	struct xfs_scrub_context	*sc,
167 	int				whichfork,
168 	xfs_fileoff_t			offset,
169 	int				*error)
170 {
171 	return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
172 			XFS_SCRUB_OFLAG_CORRUPT, __return_address);
173 }
174 
175 bool
176 xfs_scrub_fblock_xref_process_error(
177 	struct xfs_scrub_context	*sc,
178 	int				whichfork,
179 	xfs_fileoff_t			offset,
180 	int				*error)
181 {
182 	return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
183 			XFS_SCRUB_OFLAG_XFAIL, __return_address);
184 }
185 
186 /*
187  * Handling scrub corruption/optimization/warning checks.
188  *
189  * The *_set_{corrupt,preen,warning}() family of functions are used to
190  * record the presence of metadata that is incorrect (corrupt), could be
191  * optimized somehow (preen), or should be flagged for administrative
192  * review but is not incorrect (warn).
193  *
194  * ftrace can be used to record the precise metadata location and
195  * approximate code location of the failed check.
196  */
197 
198 /* Record a block which could be optimized. */
199 void
200 xfs_scrub_block_set_preen(
201 	struct xfs_scrub_context	*sc,
202 	struct xfs_buf			*bp)
203 {
204 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
205 	trace_xfs_scrub_block_preen(sc, bp->b_bn, __return_address);
206 }
207 
208 /*
209  * Record an inode which could be optimized.  The trace data will
210  * include the block given by bp if bp is given; otherwise it will use
211  * the block location of the inode record itself.
212  */
213 void
214 xfs_scrub_ino_set_preen(
215 	struct xfs_scrub_context	*sc,
216 	xfs_ino_t			ino)
217 {
218 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
219 	trace_xfs_scrub_ino_preen(sc, ino, __return_address);
220 }
221 
222 /* Record a corrupt block. */
223 void
224 xfs_scrub_block_set_corrupt(
225 	struct xfs_scrub_context	*sc,
226 	struct xfs_buf			*bp)
227 {
228 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
229 	trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
230 }
231 
232 /* Record a corruption while cross-referencing. */
233 void
234 xfs_scrub_block_xref_set_corrupt(
235 	struct xfs_scrub_context	*sc,
236 	struct xfs_buf			*bp)
237 {
238 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
239 	trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
240 }
241 
242 /*
243  * Record a corrupt inode.  The trace data will include the block given
244  * by bp if bp is given; otherwise it will use the block location of the
245  * inode record itself.
246  */
247 void
248 xfs_scrub_ino_set_corrupt(
249 	struct xfs_scrub_context	*sc,
250 	xfs_ino_t			ino)
251 {
252 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
253 	trace_xfs_scrub_ino_error(sc, ino, __return_address);
254 }
255 
256 /* Record a corruption while cross-referencing with an inode. */
257 void
258 xfs_scrub_ino_xref_set_corrupt(
259 	struct xfs_scrub_context	*sc,
260 	xfs_ino_t			ino)
261 {
262 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
263 	trace_xfs_scrub_ino_error(sc, ino, __return_address);
264 }
265 
266 /* Record corruption in a block indexed by a file fork. */
267 void
268 xfs_scrub_fblock_set_corrupt(
269 	struct xfs_scrub_context	*sc,
270 	int				whichfork,
271 	xfs_fileoff_t			offset)
272 {
273 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
274 	trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
275 }
276 
277 /* Record a corruption while cross-referencing a fork block. */
278 void
279 xfs_scrub_fblock_xref_set_corrupt(
280 	struct xfs_scrub_context	*sc,
281 	int				whichfork,
282 	xfs_fileoff_t			offset)
283 {
284 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
285 	trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
286 }
287 
288 /*
289  * Warn about inodes that need administrative review but is not
290  * incorrect.
291  */
292 void
293 xfs_scrub_ino_set_warning(
294 	struct xfs_scrub_context	*sc,
295 	xfs_ino_t			ino)
296 {
297 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
298 	trace_xfs_scrub_ino_warning(sc, ino, __return_address);
299 }
300 
301 /* Warn about a block indexed by a file fork that needs review. */
302 void
303 xfs_scrub_fblock_set_warning(
304 	struct xfs_scrub_context	*sc,
305 	int				whichfork,
306 	xfs_fileoff_t			offset)
307 {
308 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
309 	trace_xfs_scrub_fblock_warning(sc, whichfork, offset, __return_address);
310 }
311 
312 /* Signal an incomplete scrub. */
313 void
314 xfs_scrub_set_incomplete(
315 	struct xfs_scrub_context	*sc)
316 {
317 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
318 	trace_xfs_scrub_incomplete(sc, __return_address);
319 }
320 
321 /*
322  * rmap scrubbing -- compute the number of blocks with a given owner,
323  * at least according to the reverse mapping data.
324  */
325 
326 struct xfs_scrub_rmap_ownedby_info {
327 	struct xfs_owner_info	*oinfo;
328 	xfs_filblks_t		*blocks;
329 };
330 
331 STATIC int
332 xfs_scrub_count_rmap_ownedby_irec(
333 	struct xfs_btree_cur			*cur,
334 	struct xfs_rmap_irec			*rec,
335 	void					*priv)
336 {
337 	struct xfs_scrub_rmap_ownedby_info	*sroi = priv;
338 	bool					irec_attr;
339 	bool					oinfo_attr;
340 
341 	irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
342 	oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
343 
344 	if (rec->rm_owner != sroi->oinfo->oi_owner)
345 		return 0;
346 
347 	if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
348 		(*sroi->blocks) += rec->rm_blockcount;
349 
350 	return 0;
351 }
352 
353 /*
354  * Calculate the number of blocks the rmap thinks are owned by something.
355  * The caller should pass us an rmapbt cursor.
356  */
357 int
358 xfs_scrub_count_rmap_ownedby_ag(
359 	struct xfs_scrub_context		*sc,
360 	struct xfs_btree_cur			*cur,
361 	struct xfs_owner_info			*oinfo,
362 	xfs_filblks_t				*blocks)
363 {
364 	struct xfs_scrub_rmap_ownedby_info	sroi;
365 
366 	sroi.oinfo = oinfo;
367 	*blocks = 0;
368 	sroi.blocks = blocks;
369 
370 	return xfs_rmap_query_all(cur, xfs_scrub_count_rmap_ownedby_irec,
371 			&sroi);
372 }
373 
374 /*
375  * AG scrubbing
376  *
377  * These helpers facilitate locking an allocation group's header
378  * buffers, setting up cursors for all btrees that are present, and
379  * cleaning everything up once we're through.
380  */
381 
382 /* Decide if we want to return an AG header read failure. */
383 static inline bool
384 want_ag_read_header_failure(
385 	struct xfs_scrub_context	*sc,
386 	unsigned int			type)
387 {
388 	/* Return all AG header read failures when scanning btrees. */
389 	if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
390 	    sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
391 	    sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
392 		return true;
393 	/*
394 	 * If we're scanning a given type of AG header, we only want to
395 	 * see read failures from that specific header.  We'd like the
396 	 * other headers to cross-check them, but this isn't required.
397 	 */
398 	if (sc->sm->sm_type == type)
399 		return true;
400 	return false;
401 }
402 
403 /*
404  * Grab all the headers for an AG.
405  *
406  * The headers should be released by xfs_scrub_ag_free, but as a fail
407  * safe we attach all the buffers we grab to the scrub transaction so
408  * they'll all be freed when we cancel it.
409  */
410 int
411 xfs_scrub_ag_read_headers(
412 	struct xfs_scrub_context	*sc,
413 	xfs_agnumber_t			agno,
414 	struct xfs_buf			**agi,
415 	struct xfs_buf			**agf,
416 	struct xfs_buf			**agfl)
417 {
418 	struct xfs_mount		*mp = sc->mp;
419 	int				error;
420 
421 	error = xfs_ialloc_read_agi(mp, sc->tp, agno, agi);
422 	if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
423 		goto out;
424 
425 	error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, agf);
426 	if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
427 		goto out;
428 
429 	error = xfs_alloc_read_agfl(mp, sc->tp, agno, agfl);
430 	if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
431 		goto out;
432 	error = 0;
433 out:
434 	return error;
435 }
436 
437 /* Release all the AG btree cursors. */
438 void
439 xfs_scrub_ag_btcur_free(
440 	struct xfs_scrub_ag		*sa)
441 {
442 	if (sa->refc_cur)
443 		xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
444 	if (sa->rmap_cur)
445 		xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
446 	if (sa->fino_cur)
447 		xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
448 	if (sa->ino_cur)
449 		xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
450 	if (sa->cnt_cur)
451 		xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
452 	if (sa->bno_cur)
453 		xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
454 
455 	sa->refc_cur = NULL;
456 	sa->rmap_cur = NULL;
457 	sa->fino_cur = NULL;
458 	sa->ino_cur = NULL;
459 	sa->bno_cur = NULL;
460 	sa->cnt_cur = NULL;
461 }
462 
463 /* Initialize all the btree cursors for an AG. */
464 int
465 xfs_scrub_ag_btcur_init(
466 	struct xfs_scrub_context	*sc,
467 	struct xfs_scrub_ag		*sa)
468 {
469 	struct xfs_mount		*mp = sc->mp;
470 	xfs_agnumber_t			agno = sa->agno;
471 
472 	if (sa->agf_bp) {
473 		/* Set up a bnobt cursor for cross-referencing. */
474 		sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
475 				agno, XFS_BTNUM_BNO);
476 		if (!sa->bno_cur)
477 			goto err;
478 
479 		/* Set up a cntbt cursor for cross-referencing. */
480 		sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
481 				agno, XFS_BTNUM_CNT);
482 		if (!sa->cnt_cur)
483 			goto err;
484 	}
485 
486 	/* Set up a inobt cursor for cross-referencing. */
487 	if (sa->agi_bp) {
488 		sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
489 					agno, XFS_BTNUM_INO);
490 		if (!sa->ino_cur)
491 			goto err;
492 	}
493 
494 	/* Set up a finobt cursor for cross-referencing. */
495 	if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb)) {
496 		sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
497 				agno, XFS_BTNUM_FINO);
498 		if (!sa->fino_cur)
499 			goto err;
500 	}
501 
502 	/* Set up a rmapbt cursor for cross-referencing. */
503 	if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb)) {
504 		sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
505 				agno);
506 		if (!sa->rmap_cur)
507 			goto err;
508 	}
509 
510 	/* Set up a refcountbt cursor for cross-referencing. */
511 	if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb)) {
512 		sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
513 				sa->agf_bp, agno, NULL);
514 		if (!sa->refc_cur)
515 			goto err;
516 	}
517 
518 	return 0;
519 err:
520 	return -ENOMEM;
521 }
522 
523 /* Release the AG header context and btree cursors. */
524 void
525 xfs_scrub_ag_free(
526 	struct xfs_scrub_context	*sc,
527 	struct xfs_scrub_ag		*sa)
528 {
529 	xfs_scrub_ag_btcur_free(sa);
530 	if (sa->agfl_bp) {
531 		xfs_trans_brelse(sc->tp, sa->agfl_bp);
532 		sa->agfl_bp = NULL;
533 	}
534 	if (sa->agf_bp) {
535 		xfs_trans_brelse(sc->tp, sa->agf_bp);
536 		sa->agf_bp = NULL;
537 	}
538 	if (sa->agi_bp) {
539 		xfs_trans_brelse(sc->tp, sa->agi_bp);
540 		sa->agi_bp = NULL;
541 	}
542 	sa->agno = NULLAGNUMBER;
543 }
544 
545 /*
546  * For scrub, grab the AGI and the AGF headers, in that order.  Locking
547  * order requires us to get the AGI before the AGF.  We use the
548  * transaction to avoid deadlocking on crosslinked metadata buffers;
549  * either the caller passes one in (bmap scrub) or we have to create a
550  * transaction ourselves.
551  */
552 int
553 xfs_scrub_ag_init(
554 	struct xfs_scrub_context	*sc,
555 	xfs_agnumber_t			agno,
556 	struct xfs_scrub_ag		*sa)
557 {
558 	int				error;
559 
560 	sa->agno = agno;
561 	error = xfs_scrub_ag_read_headers(sc, agno, &sa->agi_bp,
562 			&sa->agf_bp, &sa->agfl_bp);
563 	if (error)
564 		return error;
565 
566 	return xfs_scrub_ag_btcur_init(sc, sa);
567 }
568 
569 /* Per-scrubber setup functions */
570 
571 /* Set us up with a transaction and an empty context. */
572 int
573 xfs_scrub_setup_fs(
574 	struct xfs_scrub_context	*sc,
575 	struct xfs_inode		*ip)
576 {
577 	return xfs_scrub_trans_alloc(sc->sm, sc->mp, &sc->tp);
578 }
579 
580 /* Set us up with AG headers and btree cursors. */
581 int
582 xfs_scrub_setup_ag_btree(
583 	struct xfs_scrub_context	*sc,
584 	struct xfs_inode		*ip,
585 	bool				force_log)
586 {
587 	struct xfs_mount		*mp = sc->mp;
588 	int				error;
589 
590 	/*
591 	 * If the caller asks us to checkpont the log, do so.  This
592 	 * expensive operation should be performed infrequently and only
593 	 * as a last resort.  Any caller that sets force_log should
594 	 * document why they need to do so.
595 	 */
596 	if (force_log) {
597 		error = xfs_scrub_checkpoint_log(mp);
598 		if (error)
599 			return error;
600 	}
601 
602 	error = xfs_scrub_setup_fs(sc, ip);
603 	if (error)
604 		return error;
605 
606 	return xfs_scrub_ag_init(sc, sc->sm->sm_agno, &sc->sa);
607 }
608 
609 /* Push everything out of the log onto disk. */
610 int
611 xfs_scrub_checkpoint_log(
612 	struct xfs_mount	*mp)
613 {
614 	int			error;
615 
616 	error = xfs_log_force(mp, XFS_LOG_SYNC);
617 	if (error)
618 		return error;
619 	xfs_ail_push_all_sync(mp->m_ail);
620 	return 0;
621 }
622 
623 /*
624  * Given an inode and the scrub control structure, grab either the
625  * inode referenced in the control structure or the inode passed in.
626  * The inode is not locked.
627  */
628 int
629 xfs_scrub_get_inode(
630 	struct xfs_scrub_context	*sc,
631 	struct xfs_inode		*ip_in)
632 {
633 	struct xfs_imap			imap;
634 	struct xfs_mount		*mp = sc->mp;
635 	struct xfs_inode		*ip = NULL;
636 	int				error;
637 
638 	/* We want to scan the inode we already had opened. */
639 	if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
640 		sc->ip = ip_in;
641 		return 0;
642 	}
643 
644 	/* Look up the inode, see if the generation number matches. */
645 	if (xfs_internal_inum(mp, sc->sm->sm_ino))
646 		return -ENOENT;
647 	error = xfs_iget(mp, NULL, sc->sm->sm_ino,
648 			XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip);
649 	switch (error) {
650 	case -ENOENT:
651 		/* Inode doesn't exist, just bail out. */
652 		return error;
653 	case 0:
654 		/* Got an inode, continue. */
655 		break;
656 	case -EINVAL:
657 		/*
658 		 * -EINVAL with IGET_UNTRUSTED could mean one of several
659 		 * things: userspace gave us an inode number that doesn't
660 		 * correspond to fs space, or doesn't have an inobt entry;
661 		 * or it could simply mean that the inode buffer failed the
662 		 * read verifiers.
663 		 *
664 		 * Try just the inode mapping lookup -- if it succeeds, then
665 		 * the inode buffer verifier failed and something needs fixing.
666 		 * Otherwise, we really couldn't find it so tell userspace
667 		 * that it no longer exists.
668 		 */
669 		error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap,
670 				XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE);
671 		if (error)
672 			return -ENOENT;
673 		error = -EFSCORRUPTED;
674 		/* fall through */
675 	default:
676 		trace_xfs_scrub_op_error(sc,
677 				XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
678 				XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
679 				error, __return_address);
680 		return error;
681 	}
682 	if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
683 		iput(VFS_I(ip));
684 		return -ENOENT;
685 	}
686 
687 	sc->ip = ip;
688 	return 0;
689 }
690 
691 /* Set us up to scrub a file's contents. */
692 int
693 xfs_scrub_setup_inode_contents(
694 	struct xfs_scrub_context	*sc,
695 	struct xfs_inode		*ip,
696 	unsigned int			resblks)
697 {
698 	struct xfs_mount		*mp = sc->mp;
699 	int				error;
700 
701 	error = xfs_scrub_get_inode(sc, ip);
702 	if (error)
703 		return error;
704 
705 	/* Got the inode, lock it and we're ready to go. */
706 	sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
707 	xfs_ilock(sc->ip, sc->ilock_flags);
708 	error = xfs_scrub_trans_alloc(sc->sm, mp, &sc->tp);
709 	if (error)
710 		goto out;
711 	sc->ilock_flags |= XFS_ILOCK_EXCL;
712 	xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
713 
714 out:
715 	/* scrub teardown will unlock and release the inode for us */
716 	return error;
717 }
718 
719 /*
720  * Predicate that decides if we need to evaluate the cross-reference check.
721  * If there was an error accessing the cross-reference btree, just delete
722  * the cursor and skip the check.
723  */
724 bool
725 xfs_scrub_should_check_xref(
726 	struct xfs_scrub_context	*sc,
727 	int				*error,
728 	struct xfs_btree_cur		**curpp)
729 {
730 	if (*error == 0)
731 		return true;
732 
733 	if (curpp) {
734 		/* If we've already given up on xref, just bail out. */
735 		if (!*curpp)
736 			return false;
737 
738 		/* xref error, delete cursor and bail out. */
739 		xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
740 		*curpp = NULL;
741 	}
742 
743 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
744 	trace_xfs_scrub_xref_error(sc, *error, __return_address);
745 
746 	/*
747 	 * Errors encountered during cross-referencing with another
748 	 * data structure should not cause this scrubber to abort.
749 	 */
750 	*error = 0;
751 	return false;
752 }
753 
754 /* Run the structure verifiers on in-memory buffers to detect bad memory. */
755 void
756 xfs_scrub_buffer_recheck(
757 	struct xfs_scrub_context	*sc,
758 	struct xfs_buf			*bp)
759 {
760 	xfs_failaddr_t			fa;
761 
762 	if (bp->b_ops == NULL) {
763 		xfs_scrub_block_set_corrupt(sc, bp);
764 		return;
765 	}
766 	if (bp->b_ops->verify_struct == NULL) {
767 		xfs_scrub_set_incomplete(sc);
768 		return;
769 	}
770 	fa = bp->b_ops->verify_struct(bp);
771 	if (!fa)
772 		return;
773 	sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
774 	trace_xfs_scrub_block_error(sc, bp->b_bn, fa);
775 }
776