xref: /openbmc/linux/fs/xfs/scrub/repair.c (revision b634abac59acc0e4397c5cf420278b32a6e0b69e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2018-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans.h"
15 #include "xfs_sb.h"
16 #include "xfs_inode.h"
17 #include "xfs_alloc.h"
18 #include "xfs_alloc_btree.h"
19 #include "xfs_ialloc.h"
20 #include "xfs_ialloc_btree.h"
21 #include "xfs_rmap.h"
22 #include "xfs_rmap_btree.h"
23 #include "xfs_refcount_btree.h"
24 #include "xfs_extent_busy.h"
25 #include "xfs_ag.h"
26 #include "xfs_ag_resv.h"
27 #include "xfs_quota.h"
28 #include "xfs_qm.h"
29 #include "scrub/scrub.h"
30 #include "scrub/common.h"
31 #include "scrub/trace.h"
32 #include "scrub/repair.h"
33 #include "scrub/bitmap.h"
34 
35 /*
36  * Attempt to repair some metadata, if the metadata is corrupt and userspace
37  * told us to fix it.  This function returns -EAGAIN to mean "re-run scrub",
38  * and will set *fixed to true if it thinks it repaired anything.
39  */
40 int
41 xrep_attempt(
42 	struct xfs_scrub	*sc)
43 {
44 	int			error = 0;
45 
46 	trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error);
47 
48 	xchk_ag_btcur_free(&sc->sa);
49 
50 	/* Repair whatever's broken. */
51 	ASSERT(sc->ops->repair);
52 	error = sc->ops->repair(sc);
53 	trace_xrep_done(XFS_I(file_inode(sc->file)), sc->sm, error);
54 	switch (error) {
55 	case 0:
56 		/*
57 		 * Repair succeeded.  Commit the fixes and perform a second
58 		 * scrub so that we can tell userspace if we fixed the problem.
59 		 */
60 		sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
61 		sc->flags |= XREP_ALREADY_FIXED;
62 		return -EAGAIN;
63 	case -ECHRNG:
64 		sc->flags |= XCHK_NEED_DRAIN;
65 		return -EAGAIN;
66 	case -EDEADLOCK:
67 		/* Tell the caller to try again having grabbed all the locks. */
68 		if (!(sc->flags & XCHK_TRY_HARDER)) {
69 			sc->flags |= XCHK_TRY_HARDER;
70 			return -EAGAIN;
71 		}
72 		/*
73 		 * We tried harder but still couldn't grab all the resources
74 		 * we needed to fix it.  The corruption has not been fixed,
75 		 * so exit to userspace with the scan's output flags unchanged.
76 		 */
77 		return 0;
78 	default:
79 		/*
80 		 * EAGAIN tells the caller to re-scrub, so we cannot return
81 		 * that here.
82 		 */
83 		ASSERT(error != -EAGAIN);
84 		return error;
85 	}
86 }
87 
88 /*
89  * Complain about unfixable problems in the filesystem.  We don't log
90  * corruptions when IFLAG_REPAIR wasn't set on the assumption that the driver
91  * program is xfs_scrub, which will call back with IFLAG_REPAIR set if the
92  * administrator isn't running xfs_scrub in no-repairs mode.
93  *
94  * Use this helper function because _ratelimited silently declares a static
95  * structure to track rate limiting information.
96  */
97 void
98 xrep_failure(
99 	struct xfs_mount	*mp)
100 {
101 	xfs_alert_ratelimited(mp,
102 "Corruption not fixed during online repair.  Unmount and run xfs_repair.");
103 }
104 
105 /*
106  * Repair probe -- userspace uses this to probe if we're willing to repair a
107  * given mountpoint.
108  */
109 int
110 xrep_probe(
111 	struct xfs_scrub	*sc)
112 {
113 	int			error = 0;
114 
115 	if (xchk_should_terminate(sc, &error))
116 		return error;
117 
118 	return 0;
119 }
120 
121 /*
122  * Roll a transaction, keeping the AG headers locked and reinitializing
123  * the btree cursors.
124  */
125 int
126 xrep_roll_ag_trans(
127 	struct xfs_scrub	*sc)
128 {
129 	int			error;
130 
131 	/*
132 	 * Keep the AG header buffers locked while we roll the transaction.
133 	 * Ensure that both AG buffers are dirty and held when we roll the
134 	 * transaction so that they move forward in the log without losing the
135 	 * bli (and hence the bli type) when the transaction commits.
136 	 *
137 	 * Normal code would never hold clean buffers across a roll, but repair
138 	 * needs both buffers to maintain a total lock on the AG.
139 	 */
140 	if (sc->sa.agi_bp) {
141 		xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM);
142 		xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
143 	}
144 
145 	if (sc->sa.agf_bp) {
146 		xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM);
147 		xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
148 	}
149 
150 	/*
151 	 * Roll the transaction.  We still hold the AG header buffers locked
152 	 * regardless of whether or not that succeeds.  On failure, the buffers
153 	 * will be released during teardown on our way out of the kernel.  If
154 	 * successful, join the buffers to the new transaction and move on.
155 	 */
156 	error = xfs_trans_roll(&sc->tp);
157 	if (error)
158 		return error;
159 
160 	/* Join the AG headers to the new transaction. */
161 	if (sc->sa.agi_bp)
162 		xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
163 	if (sc->sa.agf_bp)
164 		xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
165 
166 	return 0;
167 }
168 
169 /*
170  * Does the given AG have enough space to rebuild a btree?  Neither AG
171  * reservation can be critical, and we must have enough space (factoring
172  * in AG reservations) to construct a whole btree.
173  */
174 bool
175 xrep_ag_has_space(
176 	struct xfs_perag	*pag,
177 	xfs_extlen_t		nr_blocks,
178 	enum xfs_ag_resv_type	type)
179 {
180 	return  !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) &&
181 		!xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) &&
182 		pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks;
183 }
184 
185 /*
186  * Figure out how many blocks to reserve for an AG repair.  We calculate the
187  * worst case estimate for the number of blocks we'd need to rebuild one of
188  * any type of per-AG btree.
189  */
190 xfs_extlen_t
191 xrep_calc_ag_resblks(
192 	struct xfs_scrub		*sc)
193 {
194 	struct xfs_mount		*mp = sc->mp;
195 	struct xfs_scrub_metadata	*sm = sc->sm;
196 	struct xfs_perag		*pag;
197 	struct xfs_buf			*bp;
198 	xfs_agino_t			icount = NULLAGINO;
199 	xfs_extlen_t			aglen = NULLAGBLOCK;
200 	xfs_extlen_t			usedlen;
201 	xfs_extlen_t			freelen;
202 	xfs_extlen_t			bnobt_sz;
203 	xfs_extlen_t			inobt_sz;
204 	xfs_extlen_t			rmapbt_sz;
205 	xfs_extlen_t			refcbt_sz;
206 	int				error;
207 
208 	if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
209 		return 0;
210 
211 	pag = xfs_perag_get(mp, sm->sm_agno);
212 	if (xfs_perag_initialised_agi(pag)) {
213 		/* Use in-core icount if possible. */
214 		icount = pag->pagi_count;
215 	} else {
216 		/* Try to get the actual counters from disk. */
217 		error = xfs_ialloc_read_agi(pag, NULL, &bp);
218 		if (!error) {
219 			icount = pag->pagi_count;
220 			xfs_buf_relse(bp);
221 		}
222 	}
223 
224 	/* Now grab the block counters from the AGF. */
225 	error = xfs_alloc_read_agf(pag, NULL, 0, &bp);
226 	if (error) {
227 		aglen = pag->block_count;
228 		freelen = aglen;
229 		usedlen = aglen;
230 	} else {
231 		struct xfs_agf	*agf = bp->b_addr;
232 
233 		aglen = be32_to_cpu(agf->agf_length);
234 		freelen = be32_to_cpu(agf->agf_freeblks);
235 		usedlen = aglen - freelen;
236 		xfs_buf_relse(bp);
237 	}
238 
239 	/* If the icount is impossible, make some worst-case assumptions. */
240 	if (icount == NULLAGINO ||
241 	    !xfs_verify_agino(pag, icount)) {
242 		icount = pag->agino_max - pag->agino_min + 1;
243 	}
244 
245 	/* If the block counts are impossible, make worst-case assumptions. */
246 	if (aglen == NULLAGBLOCK ||
247 	    aglen != pag->block_count ||
248 	    freelen >= aglen) {
249 		aglen = pag->block_count;
250 		freelen = aglen;
251 		usedlen = aglen;
252 	}
253 	xfs_perag_put(pag);
254 
255 	trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
256 			freelen, usedlen);
257 
258 	/*
259 	 * Figure out how many blocks we'd need worst case to rebuild
260 	 * each type of btree.  Note that we can only rebuild the
261 	 * bnobt/cntbt or inobt/finobt as pairs.
262 	 */
263 	bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen);
264 	if (xfs_has_sparseinodes(mp))
265 		inobt_sz = xfs_iallocbt_calc_size(mp, icount /
266 				XFS_INODES_PER_HOLEMASK_BIT);
267 	else
268 		inobt_sz = xfs_iallocbt_calc_size(mp, icount /
269 				XFS_INODES_PER_CHUNK);
270 	if (xfs_has_finobt(mp))
271 		inobt_sz *= 2;
272 	if (xfs_has_reflink(mp))
273 		refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen);
274 	else
275 		refcbt_sz = 0;
276 	if (xfs_has_rmapbt(mp)) {
277 		/*
278 		 * Guess how many blocks we need to rebuild the rmapbt.
279 		 * For non-reflink filesystems we can't have more records than
280 		 * used blocks.  However, with reflink it's possible to have
281 		 * more than one rmap record per AG block.  We don't know how
282 		 * many rmaps there could be in the AG, so we start off with
283 		 * what we hope is an generous over-estimation.
284 		 */
285 		if (xfs_has_reflink(mp))
286 			rmapbt_sz = xfs_rmapbt_calc_size(mp,
287 					(unsigned long long)aglen * 2);
288 		else
289 			rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen);
290 	} else {
291 		rmapbt_sz = 0;
292 	}
293 
294 	trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
295 			inobt_sz, rmapbt_sz, refcbt_sz);
296 
297 	return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
298 }
299 
300 /* Allocate a block in an AG. */
301 int
302 xrep_alloc_ag_block(
303 	struct xfs_scrub		*sc,
304 	const struct xfs_owner_info	*oinfo,
305 	xfs_fsblock_t			*fsbno,
306 	enum xfs_ag_resv_type		resv)
307 {
308 	struct xfs_alloc_arg		args = {0};
309 	xfs_agblock_t			bno;
310 	int				error;
311 
312 	switch (resv) {
313 	case XFS_AG_RESV_AGFL:
314 	case XFS_AG_RESV_RMAPBT:
315 		error = xfs_alloc_get_freelist(sc->sa.pag, sc->tp,
316 				sc->sa.agf_bp, &bno, 1);
317 		if (error)
318 			return error;
319 		if (bno == NULLAGBLOCK)
320 			return -ENOSPC;
321 		xfs_extent_busy_reuse(sc->mp, sc->sa.pag, bno, 1, false);
322 		*fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno, bno);
323 		if (resv == XFS_AG_RESV_RMAPBT)
324 			xfs_ag_resv_rmapbt_alloc(sc->mp, sc->sa.pag->pag_agno);
325 		return 0;
326 	default:
327 		break;
328 	}
329 
330 	args.tp = sc->tp;
331 	args.mp = sc->mp;
332 	args.pag = sc->sa.pag;
333 	args.oinfo = *oinfo;
334 	args.minlen = 1;
335 	args.maxlen = 1;
336 	args.prod = 1;
337 	args.resv = resv;
338 
339 	error = xfs_alloc_vextent_this_ag(&args, sc->sa.pag->pag_agno);
340 	if (error)
341 		return error;
342 	if (args.fsbno == NULLFSBLOCK)
343 		return -ENOSPC;
344 	ASSERT(args.len == 1);
345 	*fsbno = args.fsbno;
346 
347 	return 0;
348 }
349 
350 /* Initialize a new AG btree root block with zero entries. */
351 int
352 xrep_init_btblock(
353 	struct xfs_scrub		*sc,
354 	xfs_fsblock_t			fsb,
355 	struct xfs_buf			**bpp,
356 	xfs_btnum_t			btnum,
357 	const struct xfs_buf_ops	*ops)
358 {
359 	struct xfs_trans		*tp = sc->tp;
360 	struct xfs_mount		*mp = sc->mp;
361 	struct xfs_buf			*bp;
362 	int				error;
363 
364 	trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
365 			XFS_FSB_TO_AGBNO(mp, fsb), btnum);
366 
367 	ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.pag->pag_agno);
368 	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
369 			XFS_FSB_TO_DADDR(mp, fsb), XFS_FSB_TO_BB(mp, 1), 0,
370 			&bp);
371 	if (error)
372 		return error;
373 	xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
374 	xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.pag->pag_agno);
375 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF);
376 	xfs_trans_log_buf(tp, bp, 0, BBTOB(bp->b_length) - 1);
377 	bp->b_ops = ops;
378 	*bpp = bp;
379 
380 	return 0;
381 }
382 
383 /*
384  * Reconstructing per-AG Btrees
385  *
386  * When a space btree is corrupt, we don't bother trying to fix it.  Instead,
387  * we scan secondary space metadata to derive the records that should be in
388  * the damaged btree, initialize a fresh btree root, and insert the records.
389  * Note that for rebuilding the rmapbt we scan all the primary data to
390  * generate the new records.
391  *
392  * However, that leaves the matter of removing all the metadata describing the
393  * old broken structure.  For primary metadata we use the rmap data to collect
394  * every extent with a matching rmap owner (bitmap); we then iterate all other
395  * metadata structures with the same rmap owner to collect the extents that
396  * cannot be removed (sublist).  We then subtract sublist from bitmap to
397  * derive the blocks that were used by the old btree.  These blocks can be
398  * reaped.
399  *
400  * For rmapbt reconstructions we must use different tactics for extent
401  * collection.  First we iterate all primary metadata (this excludes the old
402  * rmapbt, obviously) to generate new rmap records.  The gaps in the rmap
403  * records are collected as bitmap.  The bnobt records are collected as
404  * sublist.  As with the other btrees we subtract sublist from bitmap, and the
405  * result (since the rmapbt lives in the free space) are the blocks from the
406  * old rmapbt.
407  *
408  * Disposal of Blocks from Old per-AG Btrees
409  *
410  * Now that we've constructed a new btree to replace the damaged one, we want
411  * to dispose of the blocks that (we think) the old btree was using.
412  * Previously, we used the rmapbt to collect the extents (bitmap) with the
413  * rmap owner corresponding to the tree we rebuilt, collected extents for any
414  * blocks with the same rmap owner that are owned by another data structure
415  * (sublist), and subtracted sublist from bitmap.  In theory the extents
416  * remaining in bitmap are the old btree's blocks.
417  *
418  * Unfortunately, it's possible that the btree was crosslinked with other
419  * blocks on disk.  The rmap data can tell us if there are multiple owners, so
420  * if the rmapbt says there is an owner of this block other than @oinfo, then
421  * the block is crosslinked.  Remove the reverse mapping and continue.
422  *
423  * If there is one rmap record, we can free the block, which removes the
424  * reverse mapping but doesn't add the block to the free space.  Our repair
425  * strategy is to hope the other metadata objects crosslinked on this block
426  * will be rebuilt (atop different blocks), thereby removing all the cross
427  * links.
428  *
429  * If there are no rmap records at all, we also free the block.  If the btree
430  * being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't
431  * supposed to be a rmap record and everything is ok.  For other btrees there
432  * had to have been an rmap entry for the block to have ended up on @bitmap,
433  * so if it's gone now there's something wrong and the fs will shut down.
434  *
435  * Note: If there are multiple rmap records with only the same rmap owner as
436  * the btree we're trying to rebuild and the block is indeed owned by another
437  * data structure with the same rmap owner, then the block will be in sublist
438  * and therefore doesn't need disposal.  If there are multiple rmap records
439  * with only the same rmap owner but the block is not owned by something with
440  * the same rmap owner, the block will be freed.
441  *
442  * The caller is responsible for locking the AG headers for the entire rebuild
443  * operation so that nothing else can sneak in and change the AG state while
444  * we're not looking.  We also assume that the caller already invalidated any
445  * buffers associated with @bitmap.
446  */
447 
448 /*
449  * Invalidate buffers for per-AG btree blocks we're dumping.  This function
450  * is not intended for use with file data repairs; we have bunmapi for that.
451  */
452 int
453 xrep_invalidate_blocks(
454 	struct xfs_scrub	*sc,
455 	struct xbitmap		*bitmap)
456 {
457 	struct xbitmap_range	*bmr;
458 	struct xbitmap_range	*n;
459 	struct xfs_buf		*bp;
460 	xfs_fsblock_t		fsbno;
461 
462 	/*
463 	 * For each block in each extent, see if there's an incore buffer for
464 	 * exactly that block; if so, invalidate it.  The buffer cache only
465 	 * lets us look for one buffer at a time, so we have to look one block
466 	 * at a time.  Avoid invalidating AG headers and post-EOFS blocks
467 	 * because we never own those; and if we can't TRYLOCK the buffer we
468 	 * assume it's owned by someone else.
469 	 */
470 	for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
471 		int		error;
472 
473 		/* Skip AG headers and post-EOFS blocks */
474 		if (!xfs_verify_fsbno(sc->mp, fsbno))
475 			continue;
476 		error = xfs_buf_incore(sc->mp->m_ddev_targp,
477 				XFS_FSB_TO_DADDR(sc->mp, fsbno),
478 				XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp);
479 		if (error)
480 			continue;
481 
482 		xfs_trans_bjoin(sc->tp, bp);
483 		xfs_trans_binval(sc->tp, bp);
484 	}
485 
486 	return 0;
487 }
488 
489 /* Ensure the freelist is the correct size. */
490 int
491 xrep_fix_freelist(
492 	struct xfs_scrub	*sc,
493 	bool			can_shrink)
494 {
495 	struct xfs_alloc_arg	args = {0};
496 
497 	args.mp = sc->mp;
498 	args.tp = sc->tp;
499 	args.agno = sc->sa.pag->pag_agno;
500 	args.alignment = 1;
501 	args.pag = sc->sa.pag;
502 
503 	return xfs_alloc_fix_freelist(&args,
504 			can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK);
505 }
506 
507 /*
508  * Put a block back on the AGFL.
509  */
510 STATIC int
511 xrep_put_freelist(
512 	struct xfs_scrub	*sc,
513 	xfs_agblock_t		agbno)
514 {
515 	struct xfs_buf		*agfl_bp;
516 	int			error;
517 
518 	/* Make sure there's space on the freelist. */
519 	error = xrep_fix_freelist(sc, true);
520 	if (error)
521 		return error;
522 
523 	/*
524 	 * Since we're "freeing" a lost block onto the AGFL, we have to
525 	 * create an rmap for the block prior to merging it or else other
526 	 * parts will break.
527 	 */
528 	error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1,
529 			&XFS_RMAP_OINFO_AG);
530 	if (error)
531 		return error;
532 
533 	/* Put the block on the AGFL. */
534 	error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp);
535 	if (error)
536 		return error;
537 
538 	error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp,
539 			agfl_bp, agbno, 0);
540 	if (error)
541 		return error;
542 	xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1,
543 			XFS_EXTENT_BUSY_SKIP_DISCARD);
544 
545 	return 0;
546 }
547 
548 /* Dispose of a single block. */
549 STATIC int
550 xrep_reap_block(
551 	struct xfs_scrub		*sc,
552 	xfs_fsblock_t			fsbno,
553 	const struct xfs_owner_info	*oinfo,
554 	enum xfs_ag_resv_type		resv)
555 {
556 	struct xfs_btree_cur		*cur;
557 	struct xfs_buf			*agf_bp = NULL;
558 	xfs_agblock_t			agbno;
559 	bool				has_other_rmap;
560 	int				error;
561 
562 	agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
563 	ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno);
564 
565 	/*
566 	 * If we are repairing per-inode metadata, we need to read in the AGF
567 	 * buffer.  Otherwise, we're repairing a per-AG structure, so reuse
568 	 * the AGF buffer that the setup functions already grabbed.
569 	 */
570 	if (sc->ip) {
571 		error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp);
572 		if (error)
573 			return error;
574 	} else {
575 		agf_bp = sc->sa.agf_bp;
576 	}
577 	cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag);
578 
579 	/* Can we find any other rmappings? */
580 	error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
581 	xfs_btree_del_cursor(cur, error);
582 	if (error)
583 		goto out_free;
584 
585 	/*
586 	 * If there are other rmappings, this block is cross linked and must
587 	 * not be freed.  Remove the reverse mapping and move on.  Otherwise,
588 	 * we were the only owner of the block, so free the extent, which will
589 	 * also remove the rmap.
590 	 *
591 	 * XXX: XFS doesn't support detecting the case where a single block
592 	 * metadata structure is crosslinked with a multi-block structure
593 	 * because the buffer cache doesn't detect aliasing problems, so we
594 	 * can't fix 100% of crosslinking problems (yet).  The verifiers will
595 	 * blow on writeout, the filesystem will shut down, and the admin gets
596 	 * to run xfs_repair.
597 	 */
598 	if (has_other_rmap)
599 		error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno,
600 					1, oinfo);
601 	else if (resv == XFS_AG_RESV_AGFL)
602 		error = xrep_put_freelist(sc, agbno);
603 	else
604 		error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, oinfo,
605 				resv);
606 	if (agf_bp != sc->sa.agf_bp)
607 		xfs_trans_brelse(sc->tp, agf_bp);
608 	if (error)
609 		return error;
610 
611 	if (sc->ip)
612 		return xfs_trans_roll_inode(&sc->tp, sc->ip);
613 	return xrep_roll_ag_trans(sc);
614 
615 out_free:
616 	if (agf_bp != sc->sa.agf_bp)
617 		xfs_trans_brelse(sc->tp, agf_bp);
618 	return error;
619 }
620 
621 /* Dispose of every block of every extent in the bitmap. */
622 int
623 xrep_reap_extents(
624 	struct xfs_scrub		*sc,
625 	struct xbitmap			*bitmap,
626 	const struct xfs_owner_info	*oinfo,
627 	enum xfs_ag_resv_type		type)
628 {
629 	struct xbitmap_range		*bmr;
630 	struct xbitmap_range		*n;
631 	xfs_fsblock_t			fsbno;
632 	int				error = 0;
633 
634 	ASSERT(xfs_has_rmapbt(sc->mp));
635 
636 	for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
637 		ASSERT(sc->ip != NULL ||
638 		       XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno);
639 		trace_xrep_dispose_btree_extent(sc->mp,
640 				XFS_FSB_TO_AGNO(sc->mp, fsbno),
641 				XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
642 
643 		error = xrep_reap_block(sc, fsbno, oinfo, type);
644 		if (error)
645 			break;
646 	}
647 
648 	return error;
649 }
650 
651 /*
652  * Finding per-AG Btree Roots for AGF/AGI Reconstruction
653  *
654  * If the AGF or AGI become slightly corrupted, it may be necessary to rebuild
655  * the AG headers by using the rmap data to rummage through the AG looking for
656  * btree roots.  This is not guaranteed to work if the AG is heavily damaged
657  * or the rmap data are corrupt.
658  *
659  * Callers of xrep_find_ag_btree_roots must lock the AGF and AGFL
660  * buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the
661  * AGI is being rebuilt.  It must maintain these locks until it's safe for
662  * other threads to change the btrees' shapes.  The caller provides
663  * information about the btrees to look for by passing in an array of
664  * xrep_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
665  * The (root, height) fields will be set on return if anything is found.  The
666  * last element of the array should have a NULL buf_ops to mark the end of the
667  * array.
668  *
669  * For every rmapbt record matching any of the rmap owners in btree_info,
670  * read each block referenced by the rmap record.  If the block is a btree
671  * block from this filesystem matching any of the magic numbers and has a
672  * level higher than what we've already seen, remember the block and the
673  * height of the tree required to have such a block.  When the call completes,
674  * we return the highest block we've found for each btree description; those
675  * should be the roots.
676  */
677 
678 struct xrep_findroot {
679 	struct xfs_scrub		*sc;
680 	struct xfs_buf			*agfl_bp;
681 	struct xfs_agf			*agf;
682 	struct xrep_find_ag_btree	*btree_info;
683 };
684 
685 /* See if our block is in the AGFL. */
686 STATIC int
687 xrep_findroot_agfl_walk(
688 	struct xfs_mount	*mp,
689 	xfs_agblock_t		bno,
690 	void			*priv)
691 {
692 	xfs_agblock_t		*agbno = priv;
693 
694 	return (*agbno == bno) ? -ECANCELED : 0;
695 }
696 
697 /* Does this block match the btree information passed in? */
698 STATIC int
699 xrep_findroot_block(
700 	struct xrep_findroot		*ri,
701 	struct xrep_find_ag_btree	*fab,
702 	uint64_t			owner,
703 	xfs_agblock_t			agbno,
704 	bool				*done_with_block)
705 {
706 	struct xfs_mount		*mp = ri->sc->mp;
707 	struct xfs_buf			*bp;
708 	struct xfs_btree_block		*btblock;
709 	xfs_daddr_t			daddr;
710 	int				block_level;
711 	int				error = 0;
712 
713 	daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.pag->pag_agno, agbno);
714 
715 	/*
716 	 * Blocks in the AGFL have stale contents that might just happen to
717 	 * have a matching magic and uuid.  We don't want to pull these blocks
718 	 * in as part of a tree root, so we have to filter out the AGFL stuff
719 	 * here.  If the AGFL looks insane we'll just refuse to repair.
720 	 */
721 	if (owner == XFS_RMAP_OWN_AG) {
722 		error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
723 				xrep_findroot_agfl_walk, &agbno);
724 		if (error == -ECANCELED)
725 			return 0;
726 		if (error)
727 			return error;
728 	}
729 
730 	/*
731 	 * Read the buffer into memory so that we can see if it's a match for
732 	 * our btree type.  We have no clue if it is beforehand, and we want to
733 	 * avoid xfs_trans_read_buf's behavior of dumping the DONE state (which
734 	 * will cause needless disk reads in subsequent calls to this function)
735 	 * and logging metadata verifier failures.
736 	 *
737 	 * Therefore, pass in NULL buffer ops.  If the buffer was already in
738 	 * memory from some other caller it will already have b_ops assigned.
739 	 * If it was in memory from a previous unsuccessful findroot_block
740 	 * call, the buffer won't have b_ops but it should be clean and ready
741 	 * for us to try to verify if the read call succeeds.  The same applies
742 	 * if the buffer wasn't in memory at all.
743 	 *
744 	 * Note: If we never match a btree type with this buffer, it will be
745 	 * left in memory with NULL b_ops.  This shouldn't be a problem unless
746 	 * the buffer gets written.
747 	 */
748 	error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr,
749 			mp->m_bsize, 0, &bp, NULL);
750 	if (error)
751 		return error;
752 
753 	/* Ensure the block magic matches the btree type we're looking for. */
754 	btblock = XFS_BUF_TO_BLOCK(bp);
755 	ASSERT(fab->buf_ops->magic[1] != 0);
756 	if (btblock->bb_magic != fab->buf_ops->magic[1])
757 		goto out;
758 
759 	/*
760 	 * If the buffer already has ops applied and they're not the ones for
761 	 * this btree type, we know this block doesn't match the btree and we
762 	 * can bail out.
763 	 *
764 	 * If the buffer ops match ours, someone else has already validated
765 	 * the block for us, so we can move on to checking if this is a root
766 	 * block candidate.
767 	 *
768 	 * If the buffer does not have ops, nobody has successfully validated
769 	 * the contents and the buffer cannot be dirty.  If the magic, uuid,
770 	 * and structure match this btree type then we'll move on to checking
771 	 * if it's a root block candidate.  If there is no match, bail out.
772 	 */
773 	if (bp->b_ops) {
774 		if (bp->b_ops != fab->buf_ops)
775 			goto out;
776 	} else {
777 		ASSERT(!xfs_trans_buf_is_dirty(bp));
778 		if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
779 				&mp->m_sb.sb_meta_uuid))
780 			goto out;
781 		/*
782 		 * Read verifiers can reference b_ops, so we set the pointer
783 		 * here.  If the verifier fails we'll reset the buffer state
784 		 * to what it was before we touched the buffer.
785 		 */
786 		bp->b_ops = fab->buf_ops;
787 		fab->buf_ops->verify_read(bp);
788 		if (bp->b_error) {
789 			bp->b_ops = NULL;
790 			bp->b_error = 0;
791 			goto out;
792 		}
793 
794 		/*
795 		 * Some read verifiers will (re)set b_ops, so we must be
796 		 * careful not to change b_ops after running the verifier.
797 		 */
798 	}
799 
800 	/*
801 	 * This block passes the magic/uuid and verifier tests for this btree
802 	 * type.  We don't need the caller to try the other tree types.
803 	 */
804 	*done_with_block = true;
805 
806 	/*
807 	 * Compare this btree block's level to the height of the current
808 	 * candidate root block.
809 	 *
810 	 * If the level matches the root we found previously, throw away both
811 	 * blocks because there can't be two candidate roots.
812 	 *
813 	 * If level is lower in the tree than the root we found previously,
814 	 * ignore this block.
815 	 */
816 	block_level = xfs_btree_get_level(btblock);
817 	if (block_level + 1 == fab->height) {
818 		fab->root = NULLAGBLOCK;
819 		goto out;
820 	} else if (block_level < fab->height) {
821 		goto out;
822 	}
823 
824 	/*
825 	 * This is the highest block in the tree that we've found so far.
826 	 * Update the btree height to reflect what we've learned from this
827 	 * block.
828 	 */
829 	fab->height = block_level + 1;
830 
831 	/*
832 	 * If this block doesn't have sibling pointers, then it's the new root
833 	 * block candidate.  Otherwise, the root will be found farther up the
834 	 * tree.
835 	 */
836 	if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) &&
837 	    btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
838 		fab->root = agbno;
839 	else
840 		fab->root = NULLAGBLOCK;
841 
842 	trace_xrep_findroot_block(mp, ri->sc->sa.pag->pag_agno, agbno,
843 			be32_to_cpu(btblock->bb_magic), fab->height - 1);
844 out:
845 	xfs_trans_brelse(ri->sc->tp, bp);
846 	return error;
847 }
848 
849 /*
850  * Do any of the blocks in this rmap record match one of the btrees we're
851  * looking for?
852  */
853 STATIC int
854 xrep_findroot_rmap(
855 	struct xfs_btree_cur		*cur,
856 	const struct xfs_rmap_irec	*rec,
857 	void				*priv)
858 {
859 	struct xrep_findroot		*ri = priv;
860 	struct xrep_find_ag_btree	*fab;
861 	xfs_agblock_t			b;
862 	bool				done;
863 	int				error = 0;
864 
865 	/* Ignore anything that isn't AG metadata. */
866 	if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner))
867 		return 0;
868 
869 	/* Otherwise scan each block + btree type. */
870 	for (b = 0; b < rec->rm_blockcount; b++) {
871 		done = false;
872 		for (fab = ri->btree_info; fab->buf_ops; fab++) {
873 			if (rec->rm_owner != fab->rmap_owner)
874 				continue;
875 			error = xrep_findroot_block(ri, fab,
876 					rec->rm_owner, rec->rm_startblock + b,
877 					&done);
878 			if (error)
879 				return error;
880 			if (done)
881 				break;
882 		}
883 	}
884 
885 	return 0;
886 }
887 
888 /* Find the roots of the per-AG btrees described in btree_info. */
889 int
890 xrep_find_ag_btree_roots(
891 	struct xfs_scrub		*sc,
892 	struct xfs_buf			*agf_bp,
893 	struct xrep_find_ag_btree	*btree_info,
894 	struct xfs_buf			*agfl_bp)
895 {
896 	struct xfs_mount		*mp = sc->mp;
897 	struct xrep_findroot		ri;
898 	struct xrep_find_ag_btree	*fab;
899 	struct xfs_btree_cur		*cur;
900 	int				error;
901 
902 	ASSERT(xfs_buf_islocked(agf_bp));
903 	ASSERT(agfl_bp == NULL || xfs_buf_islocked(agfl_bp));
904 
905 	ri.sc = sc;
906 	ri.btree_info = btree_info;
907 	ri.agf = agf_bp->b_addr;
908 	ri.agfl_bp = agfl_bp;
909 	for (fab = btree_info; fab->buf_ops; fab++) {
910 		ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG);
911 		ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner));
912 		fab->root = NULLAGBLOCK;
913 		fab->height = 0;
914 	}
915 
916 	cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
917 	error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
918 	xfs_btree_del_cursor(cur, error);
919 
920 	return error;
921 }
922 
923 /* Force a quotacheck the next time we mount. */
924 void
925 xrep_force_quotacheck(
926 	struct xfs_scrub	*sc,
927 	xfs_dqtype_t		type)
928 {
929 	uint			flag;
930 
931 	flag = xfs_quota_chkd_flag(type);
932 	if (!(flag & sc->mp->m_qflags))
933 		return;
934 
935 	mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
936 	sc->mp->m_qflags &= ~flag;
937 	spin_lock(&sc->mp->m_sb_lock);
938 	sc->mp->m_sb.sb_qflags &= ~flag;
939 	spin_unlock(&sc->mp->m_sb_lock);
940 	xfs_log_sb(sc->tp);
941 	mutex_unlock(&sc->mp->m_quotainfo->qi_quotaofflock);
942 }
943 
944 /*
945  * Attach dquots to this inode, or schedule quotacheck to fix them.
946  *
947  * This function ensures that the appropriate dquots are attached to an inode.
948  * We cannot allow the dquot code to allocate an on-disk dquot block here
949  * because we're already in transaction context with the inode locked.  The
950  * on-disk dquot should already exist anyway.  If the quota code signals
951  * corruption or missing quota information, schedule quotacheck, which will
952  * repair corruptions in the quota metadata.
953  */
954 int
955 xrep_ino_dqattach(
956 	struct xfs_scrub	*sc)
957 {
958 	int			error;
959 
960 	error = xfs_qm_dqattach_locked(sc->ip, false);
961 	switch (error) {
962 	case -EFSBADCRC:
963 	case -EFSCORRUPTED:
964 	case -ENOENT:
965 		xfs_err_ratelimited(sc->mp,
966 "inode %llu repair encountered quota error %d, quotacheck forced.",
967 				(unsigned long long)sc->ip->i_ino, error);
968 		if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
969 			xrep_force_quotacheck(sc, XFS_DQTYPE_USER);
970 		if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
971 			xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP);
972 		if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
973 			xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ);
974 		fallthrough;
975 	case -ESRCH:
976 		error = 0;
977 		break;
978 	default:
979 		break;
980 	}
981 
982 	return error;
983 }
984