xref: /openbmc/linux/fs/xfs/scrub/repair.c (revision e06ef14b9f8eb5edab8c466680818d436eefdff0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2018-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans.h"
15 #include "xfs_sb.h"
16 #include "xfs_inode.h"
17 #include "xfs_alloc.h"
18 #include "xfs_alloc_btree.h"
19 #include "xfs_ialloc.h"
20 #include "xfs_ialloc_btree.h"
21 #include "xfs_rmap.h"
22 #include "xfs_rmap_btree.h"
23 #include "xfs_refcount_btree.h"
24 #include "xfs_extent_busy.h"
25 #include "xfs_ag.h"
26 #include "xfs_ag_resv.h"
27 #include "xfs_quota.h"
28 #include "xfs_qm.h"
29 #include "scrub/scrub.h"
30 #include "scrub/common.h"
31 #include "scrub/trace.h"
32 #include "scrub/repair.h"
33 #include "scrub/bitmap.h"
34 
35 /*
36  * Attempt to repair some metadata, if the metadata is corrupt and userspace
37  * told us to fix it.  This function returns -EAGAIN to mean "re-run scrub",
38  * and will set *fixed to true if it thinks it repaired anything.
39  */
40 int
41 xrep_attempt(
42 	struct xfs_scrub	*sc)
43 {
44 	int			error = 0;
45 
46 	trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error);
47 
48 	xchk_ag_btcur_free(&sc->sa);
49 
50 	/* Repair whatever's broken. */
51 	ASSERT(sc->ops->repair);
52 	error = sc->ops->repair(sc);
53 	trace_xrep_done(XFS_I(file_inode(sc->file)), sc->sm, error);
54 	switch (error) {
55 	case 0:
56 		/*
57 		 * Repair succeeded.  Commit the fixes and perform a second
58 		 * scrub so that we can tell userspace if we fixed the problem.
59 		 */
60 		sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
61 		sc->flags |= XREP_ALREADY_FIXED;
62 		return -EAGAIN;
63 	case -ECHRNG:
64 		sc->flags |= XCHK_NEED_DRAIN;
65 		return -EAGAIN;
66 	case -EDEADLOCK:
67 		/* Tell the caller to try again having grabbed all the locks. */
68 		if (!(sc->flags & XCHK_TRY_HARDER)) {
69 			sc->flags |= XCHK_TRY_HARDER;
70 			return -EAGAIN;
71 		}
72 		/*
73 		 * We tried harder but still couldn't grab all the resources
74 		 * we needed to fix it.  The corruption has not been fixed,
75 		 * so exit to userspace with the scan's output flags unchanged.
76 		 */
77 		return 0;
78 	default:
79 		/*
80 		 * EAGAIN tells the caller to re-scrub, so we cannot return
81 		 * that here.
82 		 */
83 		ASSERT(error != -EAGAIN);
84 		return error;
85 	}
86 }
87 
88 /*
89  * Complain about unfixable problems in the filesystem.  We don't log
90  * corruptions when IFLAG_REPAIR wasn't set on the assumption that the driver
91  * program is xfs_scrub, which will call back with IFLAG_REPAIR set if the
92  * administrator isn't running xfs_scrub in no-repairs mode.
93  *
94  * Use this helper function because _ratelimited silently declares a static
95  * structure to track rate limiting information.
96  */
97 void
98 xrep_failure(
99 	struct xfs_mount	*mp)
100 {
101 	xfs_alert_ratelimited(mp,
102 "Corruption not fixed during online repair.  Unmount and run xfs_repair.");
103 }
104 
105 /*
106  * Repair probe -- userspace uses this to probe if we're willing to repair a
107  * given mountpoint.
108  */
109 int
110 xrep_probe(
111 	struct xfs_scrub	*sc)
112 {
113 	int			error = 0;
114 
115 	if (xchk_should_terminate(sc, &error))
116 		return error;
117 
118 	return 0;
119 }
120 
121 /*
122  * Roll a transaction, keeping the AG headers locked and reinitializing
123  * the btree cursors.
124  */
125 int
126 xrep_roll_ag_trans(
127 	struct xfs_scrub	*sc)
128 {
129 	int			error;
130 
131 	/*
132 	 * Keep the AG header buffers locked while we roll the transaction.
133 	 * Ensure that both AG buffers are dirty and held when we roll the
134 	 * transaction so that they move forward in the log without losing the
135 	 * bli (and hence the bli type) when the transaction commits.
136 	 *
137 	 * Normal code would never hold clean buffers across a roll, but repair
138 	 * needs both buffers to maintain a total lock on the AG.
139 	 */
140 	if (sc->sa.agi_bp) {
141 		xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM);
142 		xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
143 	}
144 
145 	if (sc->sa.agf_bp) {
146 		xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM);
147 		xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
148 	}
149 
150 	/*
151 	 * Roll the transaction.  We still hold the AG header buffers locked
152 	 * regardless of whether or not that succeeds.  On failure, the buffers
153 	 * will be released during teardown on our way out of the kernel.  If
154 	 * successful, join the buffers to the new transaction and move on.
155 	 */
156 	error = xfs_trans_roll(&sc->tp);
157 	if (error)
158 		return error;
159 
160 	/* Join the AG headers to the new transaction. */
161 	if (sc->sa.agi_bp)
162 		xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
163 	if (sc->sa.agf_bp)
164 		xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
165 
166 	return 0;
167 }
168 
169 /*
170  * Does the given AG have enough space to rebuild a btree?  Neither AG
171  * reservation can be critical, and we must have enough space (factoring
172  * in AG reservations) to construct a whole btree.
173  */
174 bool
175 xrep_ag_has_space(
176 	struct xfs_perag	*pag,
177 	xfs_extlen_t		nr_blocks,
178 	enum xfs_ag_resv_type	type)
179 {
180 	return  !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) &&
181 		!xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) &&
182 		pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks;
183 }
184 
185 /*
186  * Figure out how many blocks to reserve for an AG repair.  We calculate the
187  * worst case estimate for the number of blocks we'd need to rebuild one of
188  * any type of per-AG btree.
189  */
190 xfs_extlen_t
191 xrep_calc_ag_resblks(
192 	struct xfs_scrub		*sc)
193 {
194 	struct xfs_mount		*mp = sc->mp;
195 	struct xfs_scrub_metadata	*sm = sc->sm;
196 	struct xfs_perag		*pag;
197 	struct xfs_buf			*bp;
198 	xfs_agino_t			icount = NULLAGINO;
199 	xfs_extlen_t			aglen = NULLAGBLOCK;
200 	xfs_extlen_t			usedlen;
201 	xfs_extlen_t			freelen;
202 	xfs_extlen_t			bnobt_sz;
203 	xfs_extlen_t			inobt_sz;
204 	xfs_extlen_t			rmapbt_sz;
205 	xfs_extlen_t			refcbt_sz;
206 	int				error;
207 
208 	if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
209 		return 0;
210 
211 	pag = xfs_perag_get(mp, sm->sm_agno);
212 	if (xfs_perag_initialised_agi(pag)) {
213 		/* Use in-core icount if possible. */
214 		icount = pag->pagi_count;
215 	} else {
216 		/* Try to get the actual counters from disk. */
217 		error = xfs_ialloc_read_agi(pag, NULL, &bp);
218 		if (!error) {
219 			icount = pag->pagi_count;
220 			xfs_buf_relse(bp);
221 		}
222 	}
223 
224 	/* Now grab the block counters from the AGF. */
225 	error = xfs_alloc_read_agf(pag, NULL, 0, &bp);
226 	if (error) {
227 		aglen = pag->block_count;
228 		freelen = aglen;
229 		usedlen = aglen;
230 	} else {
231 		struct xfs_agf	*agf = bp->b_addr;
232 
233 		aglen = be32_to_cpu(agf->agf_length);
234 		freelen = be32_to_cpu(agf->agf_freeblks);
235 		usedlen = aglen - freelen;
236 		xfs_buf_relse(bp);
237 	}
238 
239 	/* If the icount is impossible, make some worst-case assumptions. */
240 	if (icount == NULLAGINO ||
241 	    !xfs_verify_agino(pag, icount)) {
242 		icount = pag->agino_max - pag->agino_min + 1;
243 	}
244 
245 	/* If the block counts are impossible, make worst-case assumptions. */
246 	if (aglen == NULLAGBLOCK ||
247 	    aglen != pag->block_count ||
248 	    freelen >= aglen) {
249 		aglen = pag->block_count;
250 		freelen = aglen;
251 		usedlen = aglen;
252 	}
253 	xfs_perag_put(pag);
254 
255 	trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
256 			freelen, usedlen);
257 
258 	/*
259 	 * Figure out how many blocks we'd need worst case to rebuild
260 	 * each type of btree.  Note that we can only rebuild the
261 	 * bnobt/cntbt or inobt/finobt as pairs.
262 	 */
263 	bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen);
264 	if (xfs_has_sparseinodes(mp))
265 		inobt_sz = xfs_iallocbt_calc_size(mp, icount /
266 				XFS_INODES_PER_HOLEMASK_BIT);
267 	else
268 		inobt_sz = xfs_iallocbt_calc_size(mp, icount /
269 				XFS_INODES_PER_CHUNK);
270 	if (xfs_has_finobt(mp))
271 		inobt_sz *= 2;
272 	if (xfs_has_reflink(mp))
273 		refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen);
274 	else
275 		refcbt_sz = 0;
276 	if (xfs_has_rmapbt(mp)) {
277 		/*
278 		 * Guess how many blocks we need to rebuild the rmapbt.
279 		 * For non-reflink filesystems we can't have more records than
280 		 * used blocks.  However, with reflink it's possible to have
281 		 * more than one rmap record per AG block.  We don't know how
282 		 * many rmaps there could be in the AG, so we start off with
283 		 * what we hope is an generous over-estimation.
284 		 */
285 		if (xfs_has_reflink(mp))
286 			rmapbt_sz = xfs_rmapbt_calc_size(mp,
287 					(unsigned long long)aglen * 2);
288 		else
289 			rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen);
290 	} else {
291 		rmapbt_sz = 0;
292 	}
293 
294 	trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
295 			inobt_sz, rmapbt_sz, refcbt_sz);
296 
297 	return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
298 }
299 
300 /*
301  * Reconstructing per-AG Btrees
302  *
303  * When a space btree is corrupt, we don't bother trying to fix it.  Instead,
304  * we scan secondary space metadata to derive the records that should be in
305  * the damaged btree, initialize a fresh btree root, and insert the records.
306  * Note that for rebuilding the rmapbt we scan all the primary data to
307  * generate the new records.
308  *
309  * However, that leaves the matter of removing all the metadata describing the
310  * old broken structure.  For primary metadata we use the rmap data to collect
311  * every extent with a matching rmap owner (bitmap); we then iterate all other
312  * metadata structures with the same rmap owner to collect the extents that
313  * cannot be removed (sublist).  We then subtract sublist from bitmap to
314  * derive the blocks that were used by the old btree.  These blocks can be
315  * reaped.
316  *
317  * For rmapbt reconstructions we must use different tactics for extent
318  * collection.  First we iterate all primary metadata (this excludes the old
319  * rmapbt, obviously) to generate new rmap records.  The gaps in the rmap
320  * records are collected as bitmap.  The bnobt records are collected as
321  * sublist.  As with the other btrees we subtract sublist from bitmap, and the
322  * result (since the rmapbt lives in the free space) are the blocks from the
323  * old rmapbt.
324  */
325 
326 /* Ensure the freelist is the correct size. */
327 int
328 xrep_fix_freelist(
329 	struct xfs_scrub	*sc,
330 	bool			can_shrink)
331 {
332 	struct xfs_alloc_arg	args = {0};
333 
334 	args.mp = sc->mp;
335 	args.tp = sc->tp;
336 	args.agno = sc->sa.pag->pag_agno;
337 	args.alignment = 1;
338 	args.pag = sc->sa.pag;
339 
340 	return xfs_alloc_fix_freelist(&args,
341 			can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK);
342 }
343 
344 /*
345  * Finding per-AG Btree Roots for AGF/AGI Reconstruction
346  *
347  * If the AGF or AGI become slightly corrupted, it may be necessary to rebuild
348  * the AG headers by using the rmap data to rummage through the AG looking for
349  * btree roots.  This is not guaranteed to work if the AG is heavily damaged
350  * or the rmap data are corrupt.
351  *
352  * Callers of xrep_find_ag_btree_roots must lock the AGF and AGFL
353  * buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the
354  * AGI is being rebuilt.  It must maintain these locks until it's safe for
355  * other threads to change the btrees' shapes.  The caller provides
356  * information about the btrees to look for by passing in an array of
357  * xrep_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
358  * The (root, height) fields will be set on return if anything is found.  The
359  * last element of the array should have a NULL buf_ops to mark the end of the
360  * array.
361  *
362  * For every rmapbt record matching any of the rmap owners in btree_info,
363  * read each block referenced by the rmap record.  If the block is a btree
364  * block from this filesystem matching any of the magic numbers and has a
365  * level higher than what we've already seen, remember the block and the
366  * height of the tree required to have such a block.  When the call completes,
367  * we return the highest block we've found for each btree description; those
368  * should be the roots.
369  */
370 
371 struct xrep_findroot {
372 	struct xfs_scrub		*sc;
373 	struct xfs_buf			*agfl_bp;
374 	struct xfs_agf			*agf;
375 	struct xrep_find_ag_btree	*btree_info;
376 };
377 
378 /* See if our block is in the AGFL. */
379 STATIC int
380 xrep_findroot_agfl_walk(
381 	struct xfs_mount	*mp,
382 	xfs_agblock_t		bno,
383 	void			*priv)
384 {
385 	xfs_agblock_t		*agbno = priv;
386 
387 	return (*agbno == bno) ? -ECANCELED : 0;
388 }
389 
390 /* Does this block match the btree information passed in? */
391 STATIC int
392 xrep_findroot_block(
393 	struct xrep_findroot		*ri,
394 	struct xrep_find_ag_btree	*fab,
395 	uint64_t			owner,
396 	xfs_agblock_t			agbno,
397 	bool				*done_with_block)
398 {
399 	struct xfs_mount		*mp = ri->sc->mp;
400 	struct xfs_buf			*bp;
401 	struct xfs_btree_block		*btblock;
402 	xfs_daddr_t			daddr;
403 	int				block_level;
404 	int				error = 0;
405 
406 	daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.pag->pag_agno, agbno);
407 
408 	/*
409 	 * Blocks in the AGFL have stale contents that might just happen to
410 	 * have a matching magic and uuid.  We don't want to pull these blocks
411 	 * in as part of a tree root, so we have to filter out the AGFL stuff
412 	 * here.  If the AGFL looks insane we'll just refuse to repair.
413 	 */
414 	if (owner == XFS_RMAP_OWN_AG) {
415 		error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
416 				xrep_findroot_agfl_walk, &agbno);
417 		if (error == -ECANCELED)
418 			return 0;
419 		if (error)
420 			return error;
421 	}
422 
423 	/*
424 	 * Read the buffer into memory so that we can see if it's a match for
425 	 * our btree type.  We have no clue if it is beforehand, and we want to
426 	 * avoid xfs_trans_read_buf's behavior of dumping the DONE state (which
427 	 * will cause needless disk reads in subsequent calls to this function)
428 	 * and logging metadata verifier failures.
429 	 *
430 	 * Therefore, pass in NULL buffer ops.  If the buffer was already in
431 	 * memory from some other caller it will already have b_ops assigned.
432 	 * If it was in memory from a previous unsuccessful findroot_block
433 	 * call, the buffer won't have b_ops but it should be clean and ready
434 	 * for us to try to verify if the read call succeeds.  The same applies
435 	 * if the buffer wasn't in memory at all.
436 	 *
437 	 * Note: If we never match a btree type with this buffer, it will be
438 	 * left in memory with NULL b_ops.  This shouldn't be a problem unless
439 	 * the buffer gets written.
440 	 */
441 	error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr,
442 			mp->m_bsize, 0, &bp, NULL);
443 	if (error)
444 		return error;
445 
446 	/* Ensure the block magic matches the btree type we're looking for. */
447 	btblock = XFS_BUF_TO_BLOCK(bp);
448 	ASSERT(fab->buf_ops->magic[1] != 0);
449 	if (btblock->bb_magic != fab->buf_ops->magic[1])
450 		goto out;
451 
452 	/*
453 	 * If the buffer already has ops applied and they're not the ones for
454 	 * this btree type, we know this block doesn't match the btree and we
455 	 * can bail out.
456 	 *
457 	 * If the buffer ops match ours, someone else has already validated
458 	 * the block for us, so we can move on to checking if this is a root
459 	 * block candidate.
460 	 *
461 	 * If the buffer does not have ops, nobody has successfully validated
462 	 * the contents and the buffer cannot be dirty.  If the magic, uuid,
463 	 * and structure match this btree type then we'll move on to checking
464 	 * if it's a root block candidate.  If there is no match, bail out.
465 	 */
466 	if (bp->b_ops) {
467 		if (bp->b_ops != fab->buf_ops)
468 			goto out;
469 	} else {
470 		ASSERT(!xfs_trans_buf_is_dirty(bp));
471 		if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
472 				&mp->m_sb.sb_meta_uuid))
473 			goto out;
474 		/*
475 		 * Read verifiers can reference b_ops, so we set the pointer
476 		 * here.  If the verifier fails we'll reset the buffer state
477 		 * to what it was before we touched the buffer.
478 		 */
479 		bp->b_ops = fab->buf_ops;
480 		fab->buf_ops->verify_read(bp);
481 		if (bp->b_error) {
482 			bp->b_ops = NULL;
483 			bp->b_error = 0;
484 			goto out;
485 		}
486 
487 		/*
488 		 * Some read verifiers will (re)set b_ops, so we must be
489 		 * careful not to change b_ops after running the verifier.
490 		 */
491 	}
492 
493 	/*
494 	 * This block passes the magic/uuid and verifier tests for this btree
495 	 * type.  We don't need the caller to try the other tree types.
496 	 */
497 	*done_with_block = true;
498 
499 	/*
500 	 * Compare this btree block's level to the height of the current
501 	 * candidate root block.
502 	 *
503 	 * If the level matches the root we found previously, throw away both
504 	 * blocks because there can't be two candidate roots.
505 	 *
506 	 * If level is lower in the tree than the root we found previously,
507 	 * ignore this block.
508 	 */
509 	block_level = xfs_btree_get_level(btblock);
510 	if (block_level + 1 == fab->height) {
511 		fab->root = NULLAGBLOCK;
512 		goto out;
513 	} else if (block_level < fab->height) {
514 		goto out;
515 	}
516 
517 	/*
518 	 * This is the highest block in the tree that we've found so far.
519 	 * Update the btree height to reflect what we've learned from this
520 	 * block.
521 	 */
522 	fab->height = block_level + 1;
523 
524 	/*
525 	 * If this block doesn't have sibling pointers, then it's the new root
526 	 * block candidate.  Otherwise, the root will be found farther up the
527 	 * tree.
528 	 */
529 	if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) &&
530 	    btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
531 		fab->root = agbno;
532 	else
533 		fab->root = NULLAGBLOCK;
534 
535 	trace_xrep_findroot_block(mp, ri->sc->sa.pag->pag_agno, agbno,
536 			be32_to_cpu(btblock->bb_magic), fab->height - 1);
537 out:
538 	xfs_trans_brelse(ri->sc->tp, bp);
539 	return error;
540 }
541 
542 /*
543  * Do any of the blocks in this rmap record match one of the btrees we're
544  * looking for?
545  */
546 STATIC int
547 xrep_findroot_rmap(
548 	struct xfs_btree_cur		*cur,
549 	const struct xfs_rmap_irec	*rec,
550 	void				*priv)
551 {
552 	struct xrep_findroot		*ri = priv;
553 	struct xrep_find_ag_btree	*fab;
554 	xfs_agblock_t			b;
555 	bool				done;
556 	int				error = 0;
557 
558 	/* Ignore anything that isn't AG metadata. */
559 	if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner))
560 		return 0;
561 
562 	/* Otherwise scan each block + btree type. */
563 	for (b = 0; b < rec->rm_blockcount; b++) {
564 		done = false;
565 		for (fab = ri->btree_info; fab->buf_ops; fab++) {
566 			if (rec->rm_owner != fab->rmap_owner)
567 				continue;
568 			error = xrep_findroot_block(ri, fab,
569 					rec->rm_owner, rec->rm_startblock + b,
570 					&done);
571 			if (error)
572 				return error;
573 			if (done)
574 				break;
575 		}
576 	}
577 
578 	return 0;
579 }
580 
581 /* Find the roots of the per-AG btrees described in btree_info. */
582 int
583 xrep_find_ag_btree_roots(
584 	struct xfs_scrub		*sc,
585 	struct xfs_buf			*agf_bp,
586 	struct xrep_find_ag_btree	*btree_info,
587 	struct xfs_buf			*agfl_bp)
588 {
589 	struct xfs_mount		*mp = sc->mp;
590 	struct xrep_findroot		ri;
591 	struct xrep_find_ag_btree	*fab;
592 	struct xfs_btree_cur		*cur;
593 	int				error;
594 
595 	ASSERT(xfs_buf_islocked(agf_bp));
596 	ASSERT(agfl_bp == NULL || xfs_buf_islocked(agfl_bp));
597 
598 	ri.sc = sc;
599 	ri.btree_info = btree_info;
600 	ri.agf = agf_bp->b_addr;
601 	ri.agfl_bp = agfl_bp;
602 	for (fab = btree_info; fab->buf_ops; fab++) {
603 		ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG);
604 		ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner));
605 		fab->root = NULLAGBLOCK;
606 		fab->height = 0;
607 	}
608 
609 	cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
610 	error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
611 	xfs_btree_del_cursor(cur, error);
612 
613 	return error;
614 }
615 
616 /* Force a quotacheck the next time we mount. */
617 void
618 xrep_force_quotacheck(
619 	struct xfs_scrub	*sc,
620 	xfs_dqtype_t		type)
621 {
622 	uint			flag;
623 
624 	flag = xfs_quota_chkd_flag(type);
625 	if (!(flag & sc->mp->m_qflags))
626 		return;
627 
628 	mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
629 	sc->mp->m_qflags &= ~flag;
630 	spin_lock(&sc->mp->m_sb_lock);
631 	sc->mp->m_sb.sb_qflags &= ~flag;
632 	spin_unlock(&sc->mp->m_sb_lock);
633 	xfs_log_sb(sc->tp);
634 	mutex_unlock(&sc->mp->m_quotainfo->qi_quotaofflock);
635 }
636 
637 /*
638  * Attach dquots to this inode, or schedule quotacheck to fix them.
639  *
640  * This function ensures that the appropriate dquots are attached to an inode.
641  * We cannot allow the dquot code to allocate an on-disk dquot block here
642  * because we're already in transaction context with the inode locked.  The
643  * on-disk dquot should already exist anyway.  If the quota code signals
644  * corruption or missing quota information, schedule quotacheck, which will
645  * repair corruptions in the quota metadata.
646  */
647 int
648 xrep_ino_dqattach(
649 	struct xfs_scrub	*sc)
650 {
651 	int			error;
652 
653 	error = xfs_qm_dqattach_locked(sc->ip, false);
654 	switch (error) {
655 	case -EFSBADCRC:
656 	case -EFSCORRUPTED:
657 	case -ENOENT:
658 		xfs_err_ratelimited(sc->mp,
659 "inode %llu repair encountered quota error %d, quotacheck forced.",
660 				(unsigned long long)sc->ip->i_ino, error);
661 		if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
662 			xrep_force_quotacheck(sc, XFS_DQTYPE_USER);
663 		if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
664 			xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP);
665 		if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
666 			xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ);
667 		fallthrough;
668 	case -ESRCH:
669 		error = 0;
670 		break;
671 	default:
672 		break;
673 	}
674 
675 	return error;
676 }
677