xref: /openbmc/linux/fs/xfs/scrub/repair.c (revision 86a464179cef7185ad9e540d51063e7f196e55ba)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2018-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans.h"
15 #include "xfs_sb.h"
16 #include "xfs_inode.h"
17 #include "xfs_alloc.h"
18 #include "xfs_alloc_btree.h"
19 #include "xfs_ialloc.h"
20 #include "xfs_ialloc_btree.h"
21 #include "xfs_rmap.h"
22 #include "xfs_rmap_btree.h"
23 #include "xfs_refcount_btree.h"
24 #include "xfs_extent_busy.h"
25 #include "xfs_ag.h"
26 #include "xfs_ag_resv.h"
27 #include "xfs_quota.h"
28 #include "xfs_qm.h"
29 #include "scrub/scrub.h"
30 #include "scrub/common.h"
31 #include "scrub/trace.h"
32 #include "scrub/repair.h"
33 #include "scrub/bitmap.h"
34 
35 /*
36  * Attempt to repair some metadata, if the metadata is corrupt and userspace
37  * told us to fix it.  This function returns -EAGAIN to mean "re-run scrub",
38  * and will set *fixed to true if it thinks it repaired anything.
39  */
40 int
41 xrep_attempt(
42 	struct xfs_scrub	*sc)
43 {
44 	int			error = 0;
45 
46 	trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error);
47 
48 	xchk_ag_btcur_free(&sc->sa);
49 
50 	/* Repair whatever's broken. */
51 	ASSERT(sc->ops->repair);
52 	error = sc->ops->repair(sc);
53 	trace_xrep_done(XFS_I(file_inode(sc->file)), sc->sm, error);
54 	switch (error) {
55 	case 0:
56 		/*
57 		 * Repair succeeded.  Commit the fixes and perform a second
58 		 * scrub so that we can tell userspace if we fixed the problem.
59 		 */
60 		sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
61 		sc->flags |= XREP_ALREADY_FIXED;
62 		return -EAGAIN;
63 	case -ECHRNG:
64 		sc->flags |= XCHK_NEED_DRAIN;
65 		return -EAGAIN;
66 	case -EDEADLOCK:
67 		/* Tell the caller to try again having grabbed all the locks. */
68 		if (!(sc->flags & XCHK_TRY_HARDER)) {
69 			sc->flags |= XCHK_TRY_HARDER;
70 			return -EAGAIN;
71 		}
72 		/*
73 		 * We tried harder but still couldn't grab all the resources
74 		 * we needed to fix it.  The corruption has not been fixed,
75 		 * so exit to userspace with the scan's output flags unchanged.
76 		 */
77 		return 0;
78 	default:
79 		/*
80 		 * EAGAIN tells the caller to re-scrub, so we cannot return
81 		 * that here.
82 		 */
83 		ASSERT(error != -EAGAIN);
84 		return error;
85 	}
86 }
87 
88 /*
89  * Complain about unfixable problems in the filesystem.  We don't log
90  * corruptions when IFLAG_REPAIR wasn't set on the assumption that the driver
91  * program is xfs_scrub, which will call back with IFLAG_REPAIR set if the
92  * administrator isn't running xfs_scrub in no-repairs mode.
93  *
94  * Use this helper function because _ratelimited silently declares a static
95  * structure to track rate limiting information.
96  */
97 void
98 xrep_failure(
99 	struct xfs_mount	*mp)
100 {
101 	xfs_alert_ratelimited(mp,
102 "Corruption not fixed during online repair.  Unmount and run xfs_repair.");
103 }
104 
105 /*
106  * Repair probe -- userspace uses this to probe if we're willing to repair a
107  * given mountpoint.
108  */
109 int
110 xrep_probe(
111 	struct xfs_scrub	*sc)
112 {
113 	int			error = 0;
114 
115 	if (xchk_should_terminate(sc, &error))
116 		return error;
117 
118 	return 0;
119 }
120 
121 /*
122  * Roll a transaction, keeping the AG headers locked and reinitializing
123  * the btree cursors.
124  */
125 int
126 xrep_roll_ag_trans(
127 	struct xfs_scrub	*sc)
128 {
129 	int			error;
130 
131 	/*
132 	 * Keep the AG header buffers locked while we roll the transaction.
133 	 * Ensure that both AG buffers are dirty and held when we roll the
134 	 * transaction so that they move forward in the log without losing the
135 	 * bli (and hence the bli type) when the transaction commits.
136 	 *
137 	 * Normal code would never hold clean buffers across a roll, but repair
138 	 * needs both buffers to maintain a total lock on the AG.
139 	 */
140 	if (sc->sa.agi_bp) {
141 		xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM);
142 		xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
143 	}
144 
145 	if (sc->sa.agf_bp) {
146 		xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM);
147 		xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
148 	}
149 
150 	/*
151 	 * Roll the transaction.  We still hold the AG header buffers locked
152 	 * regardless of whether or not that succeeds.  On failure, the buffers
153 	 * will be released during teardown on our way out of the kernel.  If
154 	 * successful, join the buffers to the new transaction and move on.
155 	 */
156 	error = xfs_trans_roll(&sc->tp);
157 	if (error)
158 		return error;
159 
160 	/* Join the AG headers to the new transaction. */
161 	if (sc->sa.agi_bp)
162 		xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
163 	if (sc->sa.agf_bp)
164 		xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
165 
166 	return 0;
167 }
168 
169 /*
170  * Does the given AG have enough space to rebuild a btree?  Neither AG
171  * reservation can be critical, and we must have enough space (factoring
172  * in AG reservations) to construct a whole btree.
173  */
174 bool
175 xrep_ag_has_space(
176 	struct xfs_perag	*pag,
177 	xfs_extlen_t		nr_blocks,
178 	enum xfs_ag_resv_type	type)
179 {
180 	return  !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) &&
181 		!xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) &&
182 		pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks;
183 }
184 
185 /*
186  * Figure out how many blocks to reserve for an AG repair.  We calculate the
187  * worst case estimate for the number of blocks we'd need to rebuild one of
188  * any type of per-AG btree.
189  */
190 xfs_extlen_t
191 xrep_calc_ag_resblks(
192 	struct xfs_scrub		*sc)
193 {
194 	struct xfs_mount		*mp = sc->mp;
195 	struct xfs_scrub_metadata	*sm = sc->sm;
196 	struct xfs_perag		*pag;
197 	struct xfs_buf			*bp;
198 	xfs_agino_t			icount = NULLAGINO;
199 	xfs_extlen_t			aglen = NULLAGBLOCK;
200 	xfs_extlen_t			usedlen;
201 	xfs_extlen_t			freelen;
202 	xfs_extlen_t			bnobt_sz;
203 	xfs_extlen_t			inobt_sz;
204 	xfs_extlen_t			rmapbt_sz;
205 	xfs_extlen_t			refcbt_sz;
206 	int				error;
207 
208 	if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
209 		return 0;
210 
211 	pag = xfs_perag_get(mp, sm->sm_agno);
212 	if (xfs_perag_initialised_agi(pag)) {
213 		/* Use in-core icount if possible. */
214 		icount = pag->pagi_count;
215 	} else {
216 		/* Try to get the actual counters from disk. */
217 		error = xfs_ialloc_read_agi(pag, NULL, &bp);
218 		if (!error) {
219 			icount = pag->pagi_count;
220 			xfs_buf_relse(bp);
221 		}
222 	}
223 
224 	/* Now grab the block counters from the AGF. */
225 	error = xfs_alloc_read_agf(pag, NULL, 0, &bp);
226 	if (error) {
227 		aglen = pag->block_count;
228 		freelen = aglen;
229 		usedlen = aglen;
230 	} else {
231 		struct xfs_agf	*agf = bp->b_addr;
232 
233 		aglen = be32_to_cpu(agf->agf_length);
234 		freelen = be32_to_cpu(agf->agf_freeblks);
235 		usedlen = aglen - freelen;
236 		xfs_buf_relse(bp);
237 	}
238 
239 	/* If the icount is impossible, make some worst-case assumptions. */
240 	if (icount == NULLAGINO ||
241 	    !xfs_verify_agino(pag, icount)) {
242 		icount = pag->agino_max - pag->agino_min + 1;
243 	}
244 
245 	/* If the block counts are impossible, make worst-case assumptions. */
246 	if (aglen == NULLAGBLOCK ||
247 	    aglen != pag->block_count ||
248 	    freelen >= aglen) {
249 		aglen = pag->block_count;
250 		freelen = aglen;
251 		usedlen = aglen;
252 	}
253 	xfs_perag_put(pag);
254 
255 	trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
256 			freelen, usedlen);
257 
258 	/*
259 	 * Figure out how many blocks we'd need worst case to rebuild
260 	 * each type of btree.  Note that we can only rebuild the
261 	 * bnobt/cntbt or inobt/finobt as pairs.
262 	 */
263 	bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen);
264 	if (xfs_has_sparseinodes(mp))
265 		inobt_sz = xfs_iallocbt_calc_size(mp, icount /
266 				XFS_INODES_PER_HOLEMASK_BIT);
267 	else
268 		inobt_sz = xfs_iallocbt_calc_size(mp, icount /
269 				XFS_INODES_PER_CHUNK);
270 	if (xfs_has_finobt(mp))
271 		inobt_sz *= 2;
272 	if (xfs_has_reflink(mp))
273 		refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen);
274 	else
275 		refcbt_sz = 0;
276 	if (xfs_has_rmapbt(mp)) {
277 		/*
278 		 * Guess how many blocks we need to rebuild the rmapbt.
279 		 * For non-reflink filesystems we can't have more records than
280 		 * used blocks.  However, with reflink it's possible to have
281 		 * more than one rmap record per AG block.  We don't know how
282 		 * many rmaps there could be in the AG, so we start off with
283 		 * what we hope is an generous over-estimation.
284 		 */
285 		if (xfs_has_reflink(mp))
286 			rmapbt_sz = xfs_rmapbt_calc_size(mp,
287 					(unsigned long long)aglen * 2);
288 		else
289 			rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen);
290 	} else {
291 		rmapbt_sz = 0;
292 	}
293 
294 	trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
295 			inobt_sz, rmapbt_sz, refcbt_sz);
296 
297 	return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
298 }
299 
300 /*
301  * Reconstructing per-AG Btrees
302  *
303  * When a space btree is corrupt, we don't bother trying to fix it.  Instead,
304  * we scan secondary space metadata to derive the records that should be in
305  * the damaged btree, initialize a fresh btree root, and insert the records.
306  * Note that for rebuilding the rmapbt we scan all the primary data to
307  * generate the new records.
308  *
309  * However, that leaves the matter of removing all the metadata describing the
310  * old broken structure.  For primary metadata we use the rmap data to collect
311  * every extent with a matching rmap owner (bitmap); we then iterate all other
312  * metadata structures with the same rmap owner to collect the extents that
313  * cannot be removed (sublist).  We then subtract sublist from bitmap to
314  * derive the blocks that were used by the old btree.  These blocks can be
315  * reaped.
316  *
317  * For rmapbt reconstructions we must use different tactics for extent
318  * collection.  First we iterate all primary metadata (this excludes the old
319  * rmapbt, obviously) to generate new rmap records.  The gaps in the rmap
320  * records are collected as bitmap.  The bnobt records are collected as
321  * sublist.  As with the other btrees we subtract sublist from bitmap, and the
322  * result (since the rmapbt lives in the free space) are the blocks from the
323  * old rmapbt.
324  *
325  * Disposal of Blocks from Old per-AG Btrees
326  *
327  * Now that we've constructed a new btree to replace the damaged one, we want
328  * to dispose of the blocks that (we think) the old btree was using.
329  * Previously, we used the rmapbt to collect the extents (bitmap) with the
330  * rmap owner corresponding to the tree we rebuilt, collected extents for any
331  * blocks with the same rmap owner that are owned by another data structure
332  * (sublist), and subtracted sublist from bitmap.  In theory the extents
333  * remaining in bitmap are the old btree's blocks.
334  *
335  * Unfortunately, it's possible that the btree was crosslinked with other
336  * blocks on disk.  The rmap data can tell us if there are multiple owners, so
337  * if the rmapbt says there is an owner of this block other than @oinfo, then
338  * the block is crosslinked.  Remove the reverse mapping and continue.
339  *
340  * If there is one rmap record, we can free the block, which removes the
341  * reverse mapping but doesn't add the block to the free space.  Our repair
342  * strategy is to hope the other metadata objects crosslinked on this block
343  * will be rebuilt (atop different blocks), thereby removing all the cross
344  * links.
345  *
346  * If there are no rmap records at all, we also free the block.  If the btree
347  * being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't
348  * supposed to be a rmap record and everything is ok.  For other btrees there
349  * had to have been an rmap entry for the block to have ended up on @bitmap,
350  * so if it's gone now there's something wrong and the fs will shut down.
351  *
352  * Note: If there are multiple rmap records with only the same rmap owner as
353  * the btree we're trying to rebuild and the block is indeed owned by another
354  * data structure with the same rmap owner, then the block will be in sublist
355  * and therefore doesn't need disposal.  If there are multiple rmap records
356  * with only the same rmap owner but the block is not owned by something with
357  * the same rmap owner, the block will be freed.
358  *
359  * The caller is responsible for locking the AG headers for the entire rebuild
360  * operation so that nothing else can sneak in and change the AG state while
361  * we're not looking.  We also assume that the caller already invalidated any
362  * buffers associated with @bitmap.
363  */
364 
365 static int
366 xrep_invalidate_block(
367 	uint64_t		fsbno,
368 	void			*priv)
369 {
370 	struct xfs_scrub	*sc = priv;
371 	struct xfs_buf		*bp;
372 	int			error;
373 
374 	/* Skip AG headers and post-EOFS blocks */
375 	if (!xfs_verify_fsbno(sc->mp, fsbno))
376 		return 0;
377 
378 	error = xfs_buf_incore(sc->mp->m_ddev_targp,
379 			XFS_FSB_TO_DADDR(sc->mp, fsbno),
380 			XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp);
381 	if (error)
382 		return 0;
383 
384 	xfs_trans_bjoin(sc->tp, bp);
385 	xfs_trans_binval(sc->tp, bp);
386 	return 0;
387 }
388 
389 /*
390  * Invalidate buffers for per-AG btree blocks we're dumping.  This function
391  * is not intended for use with file data repairs; we have bunmapi for that.
392  */
393 int
394 xrep_invalidate_blocks(
395 	struct xfs_scrub	*sc,
396 	struct xbitmap		*bitmap)
397 {
398 	/*
399 	 * For each block in each extent, see if there's an incore buffer for
400 	 * exactly that block; if so, invalidate it.  The buffer cache only
401 	 * lets us look for one buffer at a time, so we have to look one block
402 	 * at a time.  Avoid invalidating AG headers and post-EOFS blocks
403 	 * because we never own those; and if we can't TRYLOCK the buffer we
404 	 * assume it's owned by someone else.
405 	 */
406 	return xbitmap_walk_bits(bitmap, xrep_invalidate_block, sc);
407 }
408 
409 /* Ensure the freelist is the correct size. */
410 int
411 xrep_fix_freelist(
412 	struct xfs_scrub	*sc,
413 	bool			can_shrink)
414 {
415 	struct xfs_alloc_arg	args = {0};
416 
417 	args.mp = sc->mp;
418 	args.tp = sc->tp;
419 	args.agno = sc->sa.pag->pag_agno;
420 	args.alignment = 1;
421 	args.pag = sc->sa.pag;
422 
423 	return xfs_alloc_fix_freelist(&args,
424 			can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK);
425 }
426 
427 /* Information about reaping extents after a repair. */
428 struct xrep_reap_state {
429 	struct xfs_scrub		*sc;
430 
431 	/* Reverse mapping owner and metadata reservation type. */
432 	const struct xfs_owner_info	*oinfo;
433 	enum xfs_ag_resv_type		resv;
434 };
435 
436 /*
437  * Put a block back on the AGFL.
438  */
439 STATIC int
440 xrep_put_freelist(
441 	struct xfs_scrub	*sc,
442 	xfs_agblock_t		agbno)
443 {
444 	struct xfs_buf		*agfl_bp;
445 	int			error;
446 
447 	/* Make sure there's space on the freelist. */
448 	error = xrep_fix_freelist(sc, true);
449 	if (error)
450 		return error;
451 
452 	/*
453 	 * Since we're "freeing" a lost block onto the AGFL, we have to
454 	 * create an rmap for the block prior to merging it or else other
455 	 * parts will break.
456 	 */
457 	error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1,
458 			&XFS_RMAP_OINFO_AG);
459 	if (error)
460 		return error;
461 
462 	/* Put the block on the AGFL. */
463 	error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp);
464 	if (error)
465 		return error;
466 
467 	error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp,
468 			agfl_bp, agbno, 0);
469 	if (error)
470 		return error;
471 	xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1,
472 			XFS_EXTENT_BUSY_SKIP_DISCARD);
473 
474 	return 0;
475 }
476 
477 /* Dispose of a single block. */
478 STATIC int
479 xrep_reap_block(
480 	uint64_t			fsbno,
481 	void				*priv)
482 {
483 	struct xrep_reap_state		*rs = priv;
484 	struct xfs_scrub		*sc = rs->sc;
485 	struct xfs_btree_cur		*cur;
486 	struct xfs_buf			*agf_bp = NULL;
487 	xfs_agblock_t			agbno;
488 	bool				has_other_rmap;
489 	int				error;
490 
491 	ASSERT(sc->ip != NULL ||
492 	       XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno);
493 	trace_xrep_dispose_btree_extent(sc->mp,
494 			XFS_FSB_TO_AGNO(sc->mp, fsbno),
495 			XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
496 
497 	agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
498 	ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno);
499 
500 	/*
501 	 * If we are repairing per-inode metadata, we need to read in the AGF
502 	 * buffer.  Otherwise, we're repairing a per-AG structure, so reuse
503 	 * the AGF buffer that the setup functions already grabbed.
504 	 */
505 	if (sc->ip) {
506 		error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp);
507 		if (error)
508 			return error;
509 	} else {
510 		agf_bp = sc->sa.agf_bp;
511 	}
512 	cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag);
513 
514 	/* Can we find any other rmappings? */
515 	error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo,
516 			&has_other_rmap);
517 	xfs_btree_del_cursor(cur, error);
518 	if (error)
519 		goto out_free;
520 
521 	/*
522 	 * If there are other rmappings, this block is cross linked and must
523 	 * not be freed.  Remove the reverse mapping and move on.  Otherwise,
524 	 * we were the only owner of the block, so free the extent, which will
525 	 * also remove the rmap.
526 	 *
527 	 * XXX: XFS doesn't support detecting the case where a single block
528 	 * metadata structure is crosslinked with a multi-block structure
529 	 * because the buffer cache doesn't detect aliasing problems, so we
530 	 * can't fix 100% of crosslinking problems (yet).  The verifiers will
531 	 * blow on writeout, the filesystem will shut down, and the admin gets
532 	 * to run xfs_repair.
533 	 */
534 	if (has_other_rmap)
535 		error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno,
536 					1, rs->oinfo);
537 	else if (rs->resv == XFS_AG_RESV_AGFL)
538 		error = xrep_put_freelist(sc, agbno);
539 	else
540 		error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo,
541 				rs->resv);
542 	if (agf_bp != sc->sa.agf_bp)
543 		xfs_trans_brelse(sc->tp, agf_bp);
544 	if (error)
545 		return error;
546 
547 	if (sc->ip)
548 		return xfs_trans_roll_inode(&sc->tp, sc->ip);
549 	return xrep_roll_ag_trans(sc);
550 
551 out_free:
552 	if (agf_bp != sc->sa.agf_bp)
553 		xfs_trans_brelse(sc->tp, agf_bp);
554 	return error;
555 }
556 
557 /* Dispose of every block of every extent in the bitmap. */
558 int
559 xrep_reap_extents(
560 	struct xfs_scrub		*sc,
561 	struct xbitmap			*bitmap,
562 	const struct xfs_owner_info	*oinfo,
563 	enum xfs_ag_resv_type		type)
564 {
565 	struct xrep_reap_state		rs = {
566 		.sc			= sc,
567 		.oinfo			= oinfo,
568 		.resv			= type,
569 	};
570 
571 	ASSERT(xfs_has_rmapbt(sc->mp));
572 
573 	return xbitmap_walk_bits(bitmap, xrep_reap_block, &rs);
574 }
575 
576 /*
577  * Finding per-AG Btree Roots for AGF/AGI Reconstruction
578  *
579  * If the AGF or AGI become slightly corrupted, it may be necessary to rebuild
580  * the AG headers by using the rmap data to rummage through the AG looking for
581  * btree roots.  This is not guaranteed to work if the AG is heavily damaged
582  * or the rmap data are corrupt.
583  *
584  * Callers of xrep_find_ag_btree_roots must lock the AGF and AGFL
585  * buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the
586  * AGI is being rebuilt.  It must maintain these locks until it's safe for
587  * other threads to change the btrees' shapes.  The caller provides
588  * information about the btrees to look for by passing in an array of
589  * xrep_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
590  * The (root, height) fields will be set on return if anything is found.  The
591  * last element of the array should have a NULL buf_ops to mark the end of the
592  * array.
593  *
594  * For every rmapbt record matching any of the rmap owners in btree_info,
595  * read each block referenced by the rmap record.  If the block is a btree
596  * block from this filesystem matching any of the magic numbers and has a
597  * level higher than what we've already seen, remember the block and the
598  * height of the tree required to have such a block.  When the call completes,
599  * we return the highest block we've found for each btree description; those
600  * should be the roots.
601  */
602 
603 struct xrep_findroot {
604 	struct xfs_scrub		*sc;
605 	struct xfs_buf			*agfl_bp;
606 	struct xfs_agf			*agf;
607 	struct xrep_find_ag_btree	*btree_info;
608 };
609 
610 /* See if our block is in the AGFL. */
611 STATIC int
612 xrep_findroot_agfl_walk(
613 	struct xfs_mount	*mp,
614 	xfs_agblock_t		bno,
615 	void			*priv)
616 {
617 	xfs_agblock_t		*agbno = priv;
618 
619 	return (*agbno == bno) ? -ECANCELED : 0;
620 }
621 
622 /* Does this block match the btree information passed in? */
623 STATIC int
624 xrep_findroot_block(
625 	struct xrep_findroot		*ri,
626 	struct xrep_find_ag_btree	*fab,
627 	uint64_t			owner,
628 	xfs_agblock_t			agbno,
629 	bool				*done_with_block)
630 {
631 	struct xfs_mount		*mp = ri->sc->mp;
632 	struct xfs_buf			*bp;
633 	struct xfs_btree_block		*btblock;
634 	xfs_daddr_t			daddr;
635 	int				block_level;
636 	int				error = 0;
637 
638 	daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.pag->pag_agno, agbno);
639 
640 	/*
641 	 * Blocks in the AGFL have stale contents that might just happen to
642 	 * have a matching magic and uuid.  We don't want to pull these blocks
643 	 * in as part of a tree root, so we have to filter out the AGFL stuff
644 	 * here.  If the AGFL looks insane we'll just refuse to repair.
645 	 */
646 	if (owner == XFS_RMAP_OWN_AG) {
647 		error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
648 				xrep_findroot_agfl_walk, &agbno);
649 		if (error == -ECANCELED)
650 			return 0;
651 		if (error)
652 			return error;
653 	}
654 
655 	/*
656 	 * Read the buffer into memory so that we can see if it's a match for
657 	 * our btree type.  We have no clue if it is beforehand, and we want to
658 	 * avoid xfs_trans_read_buf's behavior of dumping the DONE state (which
659 	 * will cause needless disk reads in subsequent calls to this function)
660 	 * and logging metadata verifier failures.
661 	 *
662 	 * Therefore, pass in NULL buffer ops.  If the buffer was already in
663 	 * memory from some other caller it will already have b_ops assigned.
664 	 * If it was in memory from a previous unsuccessful findroot_block
665 	 * call, the buffer won't have b_ops but it should be clean and ready
666 	 * for us to try to verify if the read call succeeds.  The same applies
667 	 * if the buffer wasn't in memory at all.
668 	 *
669 	 * Note: If we never match a btree type with this buffer, it will be
670 	 * left in memory with NULL b_ops.  This shouldn't be a problem unless
671 	 * the buffer gets written.
672 	 */
673 	error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr,
674 			mp->m_bsize, 0, &bp, NULL);
675 	if (error)
676 		return error;
677 
678 	/* Ensure the block magic matches the btree type we're looking for. */
679 	btblock = XFS_BUF_TO_BLOCK(bp);
680 	ASSERT(fab->buf_ops->magic[1] != 0);
681 	if (btblock->bb_magic != fab->buf_ops->magic[1])
682 		goto out;
683 
684 	/*
685 	 * If the buffer already has ops applied and they're not the ones for
686 	 * this btree type, we know this block doesn't match the btree and we
687 	 * can bail out.
688 	 *
689 	 * If the buffer ops match ours, someone else has already validated
690 	 * the block for us, so we can move on to checking if this is a root
691 	 * block candidate.
692 	 *
693 	 * If the buffer does not have ops, nobody has successfully validated
694 	 * the contents and the buffer cannot be dirty.  If the magic, uuid,
695 	 * and structure match this btree type then we'll move on to checking
696 	 * if it's a root block candidate.  If there is no match, bail out.
697 	 */
698 	if (bp->b_ops) {
699 		if (bp->b_ops != fab->buf_ops)
700 			goto out;
701 	} else {
702 		ASSERT(!xfs_trans_buf_is_dirty(bp));
703 		if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
704 				&mp->m_sb.sb_meta_uuid))
705 			goto out;
706 		/*
707 		 * Read verifiers can reference b_ops, so we set the pointer
708 		 * here.  If the verifier fails we'll reset the buffer state
709 		 * to what it was before we touched the buffer.
710 		 */
711 		bp->b_ops = fab->buf_ops;
712 		fab->buf_ops->verify_read(bp);
713 		if (bp->b_error) {
714 			bp->b_ops = NULL;
715 			bp->b_error = 0;
716 			goto out;
717 		}
718 
719 		/*
720 		 * Some read verifiers will (re)set b_ops, so we must be
721 		 * careful not to change b_ops after running the verifier.
722 		 */
723 	}
724 
725 	/*
726 	 * This block passes the magic/uuid and verifier tests for this btree
727 	 * type.  We don't need the caller to try the other tree types.
728 	 */
729 	*done_with_block = true;
730 
731 	/*
732 	 * Compare this btree block's level to the height of the current
733 	 * candidate root block.
734 	 *
735 	 * If the level matches the root we found previously, throw away both
736 	 * blocks because there can't be two candidate roots.
737 	 *
738 	 * If level is lower in the tree than the root we found previously,
739 	 * ignore this block.
740 	 */
741 	block_level = xfs_btree_get_level(btblock);
742 	if (block_level + 1 == fab->height) {
743 		fab->root = NULLAGBLOCK;
744 		goto out;
745 	} else if (block_level < fab->height) {
746 		goto out;
747 	}
748 
749 	/*
750 	 * This is the highest block in the tree that we've found so far.
751 	 * Update the btree height to reflect what we've learned from this
752 	 * block.
753 	 */
754 	fab->height = block_level + 1;
755 
756 	/*
757 	 * If this block doesn't have sibling pointers, then it's the new root
758 	 * block candidate.  Otherwise, the root will be found farther up the
759 	 * tree.
760 	 */
761 	if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) &&
762 	    btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
763 		fab->root = agbno;
764 	else
765 		fab->root = NULLAGBLOCK;
766 
767 	trace_xrep_findroot_block(mp, ri->sc->sa.pag->pag_agno, agbno,
768 			be32_to_cpu(btblock->bb_magic), fab->height - 1);
769 out:
770 	xfs_trans_brelse(ri->sc->tp, bp);
771 	return error;
772 }
773 
774 /*
775  * Do any of the blocks in this rmap record match one of the btrees we're
776  * looking for?
777  */
778 STATIC int
779 xrep_findroot_rmap(
780 	struct xfs_btree_cur		*cur,
781 	const struct xfs_rmap_irec	*rec,
782 	void				*priv)
783 {
784 	struct xrep_findroot		*ri = priv;
785 	struct xrep_find_ag_btree	*fab;
786 	xfs_agblock_t			b;
787 	bool				done;
788 	int				error = 0;
789 
790 	/* Ignore anything that isn't AG metadata. */
791 	if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner))
792 		return 0;
793 
794 	/* Otherwise scan each block + btree type. */
795 	for (b = 0; b < rec->rm_blockcount; b++) {
796 		done = false;
797 		for (fab = ri->btree_info; fab->buf_ops; fab++) {
798 			if (rec->rm_owner != fab->rmap_owner)
799 				continue;
800 			error = xrep_findroot_block(ri, fab,
801 					rec->rm_owner, rec->rm_startblock + b,
802 					&done);
803 			if (error)
804 				return error;
805 			if (done)
806 				break;
807 		}
808 	}
809 
810 	return 0;
811 }
812 
813 /* Find the roots of the per-AG btrees described in btree_info. */
814 int
815 xrep_find_ag_btree_roots(
816 	struct xfs_scrub		*sc,
817 	struct xfs_buf			*agf_bp,
818 	struct xrep_find_ag_btree	*btree_info,
819 	struct xfs_buf			*agfl_bp)
820 {
821 	struct xfs_mount		*mp = sc->mp;
822 	struct xrep_findroot		ri;
823 	struct xrep_find_ag_btree	*fab;
824 	struct xfs_btree_cur		*cur;
825 	int				error;
826 
827 	ASSERT(xfs_buf_islocked(agf_bp));
828 	ASSERT(agfl_bp == NULL || xfs_buf_islocked(agfl_bp));
829 
830 	ri.sc = sc;
831 	ri.btree_info = btree_info;
832 	ri.agf = agf_bp->b_addr;
833 	ri.agfl_bp = agfl_bp;
834 	for (fab = btree_info; fab->buf_ops; fab++) {
835 		ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG);
836 		ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner));
837 		fab->root = NULLAGBLOCK;
838 		fab->height = 0;
839 	}
840 
841 	cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
842 	error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
843 	xfs_btree_del_cursor(cur, error);
844 
845 	return error;
846 }
847 
848 /* Force a quotacheck the next time we mount. */
849 void
850 xrep_force_quotacheck(
851 	struct xfs_scrub	*sc,
852 	xfs_dqtype_t		type)
853 {
854 	uint			flag;
855 
856 	flag = xfs_quota_chkd_flag(type);
857 	if (!(flag & sc->mp->m_qflags))
858 		return;
859 
860 	mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
861 	sc->mp->m_qflags &= ~flag;
862 	spin_lock(&sc->mp->m_sb_lock);
863 	sc->mp->m_sb.sb_qflags &= ~flag;
864 	spin_unlock(&sc->mp->m_sb_lock);
865 	xfs_log_sb(sc->tp);
866 	mutex_unlock(&sc->mp->m_quotainfo->qi_quotaofflock);
867 }
868 
869 /*
870  * Attach dquots to this inode, or schedule quotacheck to fix them.
871  *
872  * This function ensures that the appropriate dquots are attached to an inode.
873  * We cannot allow the dquot code to allocate an on-disk dquot block here
874  * because we're already in transaction context with the inode locked.  The
875  * on-disk dquot should already exist anyway.  If the quota code signals
876  * corruption or missing quota information, schedule quotacheck, which will
877  * repair corruptions in the quota metadata.
878  */
879 int
880 xrep_ino_dqattach(
881 	struct xfs_scrub	*sc)
882 {
883 	int			error;
884 
885 	error = xfs_qm_dqattach_locked(sc->ip, false);
886 	switch (error) {
887 	case -EFSBADCRC:
888 	case -EFSCORRUPTED:
889 	case -ENOENT:
890 		xfs_err_ratelimited(sc->mp,
891 "inode %llu repair encountered quota error %d, quotacheck forced.",
892 				(unsigned long long)sc->ip->i_ino, error);
893 		if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
894 			xrep_force_quotacheck(sc, XFS_DQTYPE_USER);
895 		if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
896 			xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP);
897 		if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
898 			xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ);
899 		fallthrough;
900 	case -ESRCH:
901 		error = 0;
902 		break;
903 	default:
904 		break;
905 	}
906 
907 	return error;
908 }
909