xref: /openbmc/linux/fs/xfs/scrub/refcount.c (revision db0502b39c21d1cab6b6778a416a5b407170be90)
1739a2fe0SDarrick J. Wong // SPDX-License-Identifier: GPL-2.0-or-later
2edc09b52SDarrick J. Wong /*
3ecc73f8aSDarrick J. Wong  * Copyright (C) 2017-2023 Oracle.  All Rights Reserved.
4739a2fe0SDarrick J. Wong  * Author: Darrick J. Wong <djwong@kernel.org>
5edc09b52SDarrick J. Wong  */
6edc09b52SDarrick J. Wong #include "xfs.h"
7edc09b52SDarrick J. Wong #include "xfs_fs.h"
8edc09b52SDarrick J. Wong #include "xfs_shared.h"
9edc09b52SDarrick J. Wong #include "xfs_format.h"
10d5c88131SDarrick J. Wong #include "xfs_trans_resv.h"
11d5c88131SDarrick J. Wong #include "xfs_mount.h"
12edc09b52SDarrick J. Wong #include "xfs_btree.h"
13edc09b52SDarrick J. Wong #include "xfs_rmap.h"
14f6d5fc21SDarrick J. Wong #include "xfs_refcount.h"
15edc09b52SDarrick J. Wong #include "scrub/scrub.h"
16edc09b52SDarrick J. Wong #include "scrub/common.h"
17edc09b52SDarrick J. Wong #include "scrub/btree.h"
1890148903SDarrick J. Wong #include "scrub/trace.h"
1936029deeSDave Chinner #include "xfs_trans_resv.h"
2036029deeSDave Chinner #include "xfs_mount.h"
2150f02fe3SDave Chinner #include "xfs_ag.h"
22edc09b52SDarrick J. Wong 
23edc09b52SDarrick J. Wong /*
24edc09b52SDarrick J. Wong  * Set us up to scrub reference count btrees.
25edc09b52SDarrick J. Wong  */
26edc09b52SDarrick J. Wong int
27c517b3aaSDarrick J. Wong xchk_setup_ag_refcountbt(
28026f57ebSDarrick J. Wong 	struct xfs_scrub	*sc)
29edc09b52SDarrick J. Wong {
30466c525dSDarrick J. Wong 	if (xchk_need_intent_drain(sc))
31466c525dSDarrick J. Wong 		xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
32026f57ebSDarrick J. Wong 	return xchk_setup_ag_btree(sc, false);
33edc09b52SDarrick J. Wong }
34edc09b52SDarrick J. Wong 
35edc09b52SDarrick J. Wong /* Reference count btree scrubber. */
36edc09b52SDarrick J. Wong 
37dbde19daSDarrick J. Wong /*
38dbde19daSDarrick J. Wong  * Confirming Reference Counts via Reverse Mappings
39dbde19daSDarrick J. Wong  *
40dbde19daSDarrick J. Wong  * We want to count the reverse mappings overlapping a refcount record
41dbde19daSDarrick J. Wong  * (bno, len, refcount), allowing for the possibility that some of the
42dbde19daSDarrick J. Wong  * overlap may come from smaller adjoining reverse mappings, while some
43dbde19daSDarrick J. Wong  * comes from single extents which overlap the range entirely.  The
44dbde19daSDarrick J. Wong  * outer loop is as follows:
45dbde19daSDarrick J. Wong  *
46dbde19daSDarrick J. Wong  * 1. For all reverse mappings overlapping the refcount extent,
47dbde19daSDarrick J. Wong  *    a. If a given rmap completely overlaps, mark it as seen.
48dbde19daSDarrick J. Wong  *    b. Otherwise, record the fragment (in agbno order) for later
49dbde19daSDarrick J. Wong  *       processing.
50dbde19daSDarrick J. Wong  *
51dbde19daSDarrick J. Wong  * Once we've seen all the rmaps, we know that for all blocks in the
52dbde19daSDarrick J. Wong  * refcount record we want to find $refcount owners and we've already
53dbde19daSDarrick J. Wong  * visited $seen extents that overlap all the blocks.  Therefore, we
54dbde19daSDarrick J. Wong  * need to find ($refcount - $seen) owners for every block in the
55dbde19daSDarrick J. Wong  * extent; call that quantity $target_nr.  Proceed as follows:
56dbde19daSDarrick J. Wong  *
57dbde19daSDarrick J. Wong  * 2. Pull the first $target_nr fragments from the list; all of them
58dbde19daSDarrick J. Wong  *    should start at or before the start of the extent.
59dbde19daSDarrick J. Wong  *    Call this subset of fragments the working set.
60dbde19daSDarrick J. Wong  * 3. Until there are no more unprocessed fragments,
61dbde19daSDarrick J. Wong  *    a. Find the shortest fragments in the set and remove them.
62dbde19daSDarrick J. Wong  *    b. Note the block number of the end of these fragments.
63dbde19daSDarrick J. Wong  *    c. Pull the same number of fragments from the list.  All of these
64dbde19daSDarrick J. Wong  *       fragments should start at the block number recorded in the
65dbde19daSDarrick J. Wong  *       previous step.
66dbde19daSDarrick J. Wong  *    d. Put those fragments in the set.
67dbde19daSDarrick J. Wong  * 4. Check that there are $target_nr fragments remaining in the list,
68dbde19daSDarrick J. Wong  *    and that they all end at or beyond the end of the refcount extent.
69dbde19daSDarrick J. Wong  *
70dbde19daSDarrick J. Wong  * If the refcount is correct, all the check conditions in the algorithm
71dbde19daSDarrick J. Wong  * should always hold true.  If not, the refcount is incorrect.
72dbde19daSDarrick J. Wong  */
73c517b3aaSDarrick J. Wong struct xchk_refcnt_frag {
74dbde19daSDarrick J. Wong 	struct list_head	list;
75dbde19daSDarrick J. Wong 	struct xfs_rmap_irec	rm;
76dbde19daSDarrick J. Wong };
77dbde19daSDarrick J. Wong 
78c517b3aaSDarrick J. Wong struct xchk_refcnt_check {
791d8a748aSDarrick J. Wong 	struct xfs_scrub	*sc;
80dbde19daSDarrick J. Wong 	struct list_head	fragments;
81dbde19daSDarrick J. Wong 
82dbde19daSDarrick J. Wong 	/* refcount extent we're examining */
83dbde19daSDarrick J. Wong 	xfs_agblock_t		bno;
84dbde19daSDarrick J. Wong 	xfs_extlen_t		len;
85dbde19daSDarrick J. Wong 	xfs_nlink_t		refcount;
86dbde19daSDarrick J. Wong 
87dbde19daSDarrick J. Wong 	/* number of owners seen */
88dbde19daSDarrick J. Wong 	xfs_nlink_t		seen;
89dbde19daSDarrick J. Wong };
90dbde19daSDarrick J. Wong 
91dbde19daSDarrick J. Wong /*
92dbde19daSDarrick J. Wong  * Decide if the given rmap is large enough that we can redeem it
93dbde19daSDarrick J. Wong  * towards refcount verification now, or if it's a fragment, in
94dbde19daSDarrick J. Wong  * which case we'll hang onto it in the hopes that we'll later
95dbde19daSDarrick J. Wong  * discover that we've collected exactly the correct number of
96dbde19daSDarrick J. Wong  * fragments as the refcountbt says we should have.
97dbde19daSDarrick J. Wong  */
98dbde19daSDarrick J. Wong STATIC int
99c517b3aaSDarrick J. Wong xchk_refcountbt_rmap_check(
100dbde19daSDarrick J. Wong 	struct xfs_btree_cur		*cur,
101159eb69dSDarrick J. Wong 	const struct xfs_rmap_irec	*rec,
102dbde19daSDarrick J. Wong 	void				*priv)
103dbde19daSDarrick J. Wong {
104c517b3aaSDarrick J. Wong 	struct xchk_refcnt_check	*refchk = priv;
105c517b3aaSDarrick J. Wong 	struct xchk_refcnt_frag		*frag;
106dbde19daSDarrick J. Wong 	xfs_agblock_t			rm_last;
107dbde19daSDarrick J. Wong 	xfs_agblock_t			rc_last;
108dbde19daSDarrick J. Wong 	int				error = 0;
109dbde19daSDarrick J. Wong 
110c517b3aaSDarrick J. Wong 	if (xchk_should_terminate(refchk->sc, &error))
111dbde19daSDarrick J. Wong 		return error;
112dbde19daSDarrick J. Wong 
113dbde19daSDarrick J. Wong 	rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
114dbde19daSDarrick J. Wong 	rc_last = refchk->bno + refchk->len - 1;
115dbde19daSDarrick J. Wong 
116dbde19daSDarrick J. Wong 	/* Confirm that a single-owner refc extent is a CoW stage. */
117dbde19daSDarrick J. Wong 	if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
118c517b3aaSDarrick J. Wong 		xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
119dbde19daSDarrick J. Wong 		return 0;
120dbde19daSDarrick J. Wong 	}
121dbde19daSDarrick J. Wong 
122dbde19daSDarrick J. Wong 	if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
123dbde19daSDarrick J. Wong 		/*
124dbde19daSDarrick J. Wong 		 * The rmap overlaps the refcount record, so we can confirm
125dbde19daSDarrick J. Wong 		 * one refcount owner seen.
126dbde19daSDarrick J. Wong 		 */
127dbde19daSDarrick J. Wong 		refchk->seen++;
128dbde19daSDarrick J. Wong 	} else {
129dbde19daSDarrick J. Wong 		/*
130dbde19daSDarrick J. Wong 		 * This rmap covers only part of the refcount record, so
131dbde19daSDarrick J. Wong 		 * save the fragment for later processing.  If the rmapbt
132dbde19daSDarrick J. Wong 		 * is healthy each rmap_irec we see will be in agbno order
133dbde19daSDarrick J. Wong 		 * so we don't need insertion sort here.
134dbde19daSDarrick J. Wong 		 */
135306195f3SDarrick J. Wong 		frag = kmalloc(sizeof(struct xchk_refcnt_frag),
136306195f3SDarrick J. Wong 				XCHK_GFP_FLAGS);
137dbde19daSDarrick J. Wong 		if (!frag)
138dbde19daSDarrick J. Wong 			return -ENOMEM;
139dbde19daSDarrick J. Wong 		memcpy(&frag->rm, rec, sizeof(frag->rm));
140dbde19daSDarrick J. Wong 		list_add_tail(&frag->list, &refchk->fragments);
141dbde19daSDarrick J. Wong 	}
142dbde19daSDarrick J. Wong 
143dbde19daSDarrick J. Wong 	return 0;
144dbde19daSDarrick J. Wong }
145dbde19daSDarrick J. Wong 
146dbde19daSDarrick J. Wong /*
147dbde19daSDarrick J. Wong  * Given a bunch of rmap fragments, iterate through them, keeping
148dbde19daSDarrick J. Wong  * a running tally of the refcount.  If this ever deviates from
149dbde19daSDarrick J. Wong  * what we expect (which is the refcountbt's refcount minus the
150dbde19daSDarrick J. Wong  * number of extents that totally covered the refcountbt extent),
151dbde19daSDarrick J. Wong  * we have a refcountbt error.
152dbde19daSDarrick J. Wong  */
153dbde19daSDarrick J. Wong STATIC void
154c517b3aaSDarrick J. Wong xchk_refcountbt_process_rmap_fragments(
155c517b3aaSDarrick J. Wong 	struct xchk_refcnt_check	*refchk)
156dbde19daSDarrick J. Wong {
157dbde19daSDarrick J. Wong 	struct list_head		worklist;
158c517b3aaSDarrick J. Wong 	struct xchk_refcnt_frag		*frag;
159c517b3aaSDarrick J. Wong 	struct xchk_refcnt_frag		*n;
160dbde19daSDarrick J. Wong 	xfs_agblock_t			bno;
161dbde19daSDarrick J. Wong 	xfs_agblock_t			rbno;
162dbde19daSDarrick J. Wong 	xfs_agblock_t			next_rbno;
163dbde19daSDarrick J. Wong 	xfs_nlink_t			nr;
164dbde19daSDarrick J. Wong 	xfs_nlink_t			target_nr;
165dbde19daSDarrick J. Wong 
166dbde19daSDarrick J. Wong 	target_nr = refchk->refcount - refchk->seen;
167dbde19daSDarrick J. Wong 	if (target_nr == 0)
168dbde19daSDarrick J. Wong 		return;
169dbde19daSDarrick J. Wong 
170dbde19daSDarrick J. Wong 	/*
171dbde19daSDarrick J. Wong 	 * There are (refchk->rc.rc_refcount - refchk->nr refcount)
172dbde19daSDarrick J. Wong 	 * references we haven't found yet.  Pull that many off the
173dbde19daSDarrick J. Wong 	 * fragment list and figure out where the smallest rmap ends
174dbde19daSDarrick J. Wong 	 * (and therefore the next rmap should start).  All the rmaps
175dbde19daSDarrick J. Wong 	 * we pull off should start at or before the beginning of the
176dbde19daSDarrick J. Wong 	 * refcount record's range.
177dbde19daSDarrick J. Wong 	 */
178dbde19daSDarrick J. Wong 	INIT_LIST_HEAD(&worklist);
179dbde19daSDarrick J. Wong 	rbno = NULLAGBLOCK;
180dbde19daSDarrick J. Wong 
181dbde19daSDarrick J. Wong 	/* Make sure the fragments actually /are/ in agbno order. */
182dbde19daSDarrick J. Wong 	bno = 0;
183dbde19daSDarrick J. Wong 	list_for_each_entry(frag, &refchk->fragments, list) {
184dbde19daSDarrick J. Wong 		if (frag->rm.rm_startblock < bno)
185dbde19daSDarrick J. Wong 			goto done;
186dbde19daSDarrick J. Wong 		bno = frag->rm.rm_startblock;
187dbde19daSDarrick J. Wong 	}
188dbde19daSDarrick J. Wong 
189dbde19daSDarrick J. Wong 	/*
190dbde19daSDarrick J. Wong 	 * Find all the rmaps that start at or before the refc extent,
191dbde19daSDarrick J. Wong 	 * and put them on the worklist.
192dbde19daSDarrick J. Wong 	 */
19354e9b09eSDarrick J. Wong 	nr = 0;
194dbde19daSDarrick J. Wong 	list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
19554e9b09eSDarrick J. Wong 		if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
19654e9b09eSDarrick J. Wong 			break;
197dbde19daSDarrick J. Wong 		bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
198dbde19daSDarrick J. Wong 		if (bno < rbno)
199dbde19daSDarrick J. Wong 			rbno = bno;
200dbde19daSDarrick J. Wong 		list_move_tail(&frag->list, &worklist);
201dbde19daSDarrick J. Wong 		nr++;
202dbde19daSDarrick J. Wong 	}
203dbde19daSDarrick J. Wong 
204dbde19daSDarrick J. Wong 	/*
205dbde19daSDarrick J. Wong 	 * We should have found exactly $target_nr rmap fragments starting
206dbde19daSDarrick J. Wong 	 * at or before the refcount extent.
207dbde19daSDarrick J. Wong 	 */
208dbde19daSDarrick J. Wong 	if (nr != target_nr)
209dbde19daSDarrick J. Wong 		goto done;
210dbde19daSDarrick J. Wong 
211dbde19daSDarrick J. Wong 	while (!list_empty(&refchk->fragments)) {
212dbde19daSDarrick J. Wong 		/* Discard any fragments ending at rbno from the worklist. */
213dbde19daSDarrick J. Wong 		nr = 0;
214dbde19daSDarrick J. Wong 		next_rbno = NULLAGBLOCK;
215dbde19daSDarrick J. Wong 		list_for_each_entry_safe(frag, n, &worklist, list) {
216dbde19daSDarrick J. Wong 			bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
217dbde19daSDarrick J. Wong 			if (bno != rbno) {
218dbde19daSDarrick J. Wong 				if (bno < next_rbno)
219dbde19daSDarrick J. Wong 					next_rbno = bno;
220dbde19daSDarrick J. Wong 				continue;
221dbde19daSDarrick J. Wong 			}
222dbde19daSDarrick J. Wong 			list_del(&frag->list);
223306195f3SDarrick J. Wong 			kfree(frag);
224dbde19daSDarrick J. Wong 			nr++;
225dbde19daSDarrick J. Wong 		}
226dbde19daSDarrick J. Wong 
227dbde19daSDarrick J. Wong 		/* Try to add nr rmaps starting at rbno to the worklist. */
228dbde19daSDarrick J. Wong 		list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
229dbde19daSDarrick J. Wong 			bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
230dbde19daSDarrick J. Wong 			if (frag->rm.rm_startblock != rbno)
231dbde19daSDarrick J. Wong 				goto done;
232dbde19daSDarrick J. Wong 			list_move_tail(&frag->list, &worklist);
233dbde19daSDarrick J. Wong 			if (next_rbno > bno)
234dbde19daSDarrick J. Wong 				next_rbno = bno;
235dbde19daSDarrick J. Wong 			nr--;
236dbde19daSDarrick J. Wong 			if (nr == 0)
237dbde19daSDarrick J. Wong 				break;
238dbde19daSDarrick J. Wong 		}
239dbde19daSDarrick J. Wong 
240dbde19daSDarrick J. Wong 		/*
241dbde19daSDarrick J. Wong 		 * If we get here and nr > 0, this means that we added fewer
242dbde19daSDarrick J. Wong 		 * items to the worklist than we discarded because the fragment
243dbde19daSDarrick J. Wong 		 * list ran out of items.  Therefore, we cannot maintain the
244dbde19daSDarrick J. Wong 		 * required refcount.  Something is wrong, so we're done.
245dbde19daSDarrick J. Wong 		 */
246dbde19daSDarrick J. Wong 		if (nr)
247dbde19daSDarrick J. Wong 			goto done;
248dbde19daSDarrick J. Wong 
249dbde19daSDarrick J. Wong 		rbno = next_rbno;
250dbde19daSDarrick J. Wong 	}
251dbde19daSDarrick J. Wong 
252dbde19daSDarrick J. Wong 	/*
253dbde19daSDarrick J. Wong 	 * Make sure the last extent we processed ends at or beyond
254dbde19daSDarrick J. Wong 	 * the end of the refcount extent.
255dbde19daSDarrick J. Wong 	 */
256dbde19daSDarrick J. Wong 	if (rbno < refchk->bno + refchk->len)
257dbde19daSDarrick J. Wong 		goto done;
258dbde19daSDarrick J. Wong 
259dbde19daSDarrick J. Wong 	/* Actually record us having seen the remaining refcount. */
260dbde19daSDarrick J. Wong 	refchk->seen = refchk->refcount;
261dbde19daSDarrick J. Wong done:
262dbde19daSDarrick J. Wong 	/* Delete fragments and work list. */
263dbde19daSDarrick J. Wong 	list_for_each_entry_safe(frag, n, &worklist, list) {
264dbde19daSDarrick J. Wong 		list_del(&frag->list);
265306195f3SDarrick J. Wong 		kfree(frag);
266dbde19daSDarrick J. Wong 	}
267dbde19daSDarrick J. Wong 	list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
268dbde19daSDarrick J. Wong 		list_del(&frag->list);
269306195f3SDarrick J. Wong 		kfree(frag);
270dbde19daSDarrick J. Wong 	}
271dbde19daSDarrick J. Wong }
272dbde19daSDarrick J. Wong 
273dbde19daSDarrick J. Wong /* Use the rmap entries covering this extent to verify the refcount. */
274dbde19daSDarrick J. Wong STATIC void
275c517b3aaSDarrick J. Wong xchk_refcountbt_xref_rmap(
2761d8a748aSDarrick J. Wong 	struct xfs_scrub		*sc,
2775a8c345cSDarrick J. Wong 	const struct xfs_refcount_irec	*irec)
278dbde19daSDarrick J. Wong {
279c517b3aaSDarrick J. Wong 	struct xchk_refcnt_check	refchk = {
280dbde19daSDarrick J. Wong 		.sc			= sc,
2815a8c345cSDarrick J. Wong 		.bno			= irec->rc_startblock,
2825a8c345cSDarrick J. Wong 		.len			= irec->rc_blockcount,
2835a8c345cSDarrick J. Wong 		.refcount		= irec->rc_refcount,
284dbde19daSDarrick J. Wong 		.seen = 0,
285dbde19daSDarrick J. Wong 	};
286dbde19daSDarrick J. Wong 	struct xfs_rmap_irec		low;
287dbde19daSDarrick J. Wong 	struct xfs_rmap_irec		high;
288c517b3aaSDarrick J. Wong 	struct xchk_refcnt_frag		*frag;
289c517b3aaSDarrick J. Wong 	struct xchk_refcnt_frag		*n;
290dbde19daSDarrick J. Wong 	int				error;
291dbde19daSDarrick J. Wong 
292c517b3aaSDarrick J. Wong 	if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
293dbde19daSDarrick J. Wong 		return;
294dbde19daSDarrick J. Wong 
295dbde19daSDarrick J. Wong 	/* Cross-reference with the rmapbt to confirm the refcount. */
296dbde19daSDarrick J. Wong 	memset(&low, 0, sizeof(low));
2975a8c345cSDarrick J. Wong 	low.rm_startblock = irec->rc_startblock;
298dbde19daSDarrick J. Wong 	memset(&high, 0xFF, sizeof(high));
2995a8c345cSDarrick J. Wong 	high.rm_startblock = irec->rc_startblock + irec->rc_blockcount - 1;
300dbde19daSDarrick J. Wong 
301dbde19daSDarrick J. Wong 	INIT_LIST_HEAD(&refchk.fragments);
302dbde19daSDarrick J. Wong 	error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
303c517b3aaSDarrick J. Wong 			&xchk_refcountbt_rmap_check, &refchk);
304c517b3aaSDarrick J. Wong 	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
305dbde19daSDarrick J. Wong 		goto out_free;
306dbde19daSDarrick J. Wong 
307c517b3aaSDarrick J. Wong 	xchk_refcountbt_process_rmap_fragments(&refchk);
30890148903SDarrick J. Wong 	if (irec->rc_refcount != refchk.seen) {
30990148903SDarrick J. Wong 		trace_xchk_refcount_incorrect(sc->sa.pag, irec, refchk.seen);
310c517b3aaSDarrick J. Wong 		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
31190148903SDarrick J. Wong 	}
312dbde19daSDarrick J. Wong 
313dbde19daSDarrick J. Wong out_free:
314dbde19daSDarrick J. Wong 	list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
315dbde19daSDarrick J. Wong 		list_del(&frag->list);
316306195f3SDarrick J. Wong 		kfree(frag);
317dbde19daSDarrick J. Wong 	}
318dbde19daSDarrick J. Wong }
319dbde19daSDarrick J. Wong 
320166d7641SDarrick J. Wong /* Cross-reference with the other btrees. */
321166d7641SDarrick J. Wong STATIC void
322c517b3aaSDarrick J. Wong xchk_refcountbt_xref(
3231d8a748aSDarrick J. Wong 	struct xfs_scrub		*sc,
3245a8c345cSDarrick J. Wong 	const struct xfs_refcount_irec	*irec)
325166d7641SDarrick J. Wong {
326166d7641SDarrick J. Wong 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
327166d7641SDarrick J. Wong 		return;
32852dc4b44SDarrick J. Wong 
3295a8c345cSDarrick J. Wong 	xchk_xref_is_used_space(sc, irec->rc_startblock, irec->rc_blockcount);
3305a8c345cSDarrick J. Wong 	xchk_xref_is_not_inode_chunk(sc, irec->rc_startblock,
3315a8c345cSDarrick J. Wong 			irec->rc_blockcount);
3325a8c345cSDarrick J. Wong 	xchk_refcountbt_xref_rmap(sc, irec);
333166d7641SDarrick J. Wong }
334166d7641SDarrick J. Wong 
3357ad9ea63SDarrick J. Wong struct xchk_refcbt_records {
336*db0502b3SDarrick J. Wong 	/* Previous refcount record. */
337*db0502b3SDarrick J. Wong 	struct xfs_refcount_irec prev_rec;
338*db0502b3SDarrick J. Wong 
3397ad9ea63SDarrick J. Wong 	/* The next AG block where we aren't expecting shared extents. */
3407ad9ea63SDarrick J. Wong 	xfs_agblock_t		next_unshared_agbno;
3417ad9ea63SDarrick J. Wong 
3427ad9ea63SDarrick J. Wong 	/* Number of CoW blocks we expect. */
3437ad9ea63SDarrick J. Wong 	xfs_agblock_t		cow_blocks;
3447ad9ea63SDarrick J. Wong 
3457ad9ea63SDarrick J. Wong 	/* Was the last record a shared or CoW staging extent? */
3467ad9ea63SDarrick J. Wong 	enum xfs_refc_domain	prev_domain;
3477ad9ea63SDarrick J. Wong };
3487ad9ea63SDarrick J. Wong 
3497ad9ea63SDarrick J. Wong STATIC int
3507ad9ea63SDarrick J. Wong xchk_refcountbt_rmap_check_gap(
3517ad9ea63SDarrick J. Wong 	struct xfs_btree_cur		*cur,
3527ad9ea63SDarrick J. Wong 	const struct xfs_rmap_irec	*rec,
3537ad9ea63SDarrick J. Wong 	void				*priv)
3547ad9ea63SDarrick J. Wong {
3557ad9ea63SDarrick J. Wong 	xfs_agblock_t			*next_bno = priv;
3567ad9ea63SDarrick J. Wong 
3577ad9ea63SDarrick J. Wong 	if (*next_bno != NULLAGBLOCK && rec->rm_startblock < *next_bno)
3587ad9ea63SDarrick J. Wong 		return -ECANCELED;
3597ad9ea63SDarrick J. Wong 
3607ad9ea63SDarrick J. Wong 	*next_bno = rec->rm_startblock + rec->rm_blockcount;
3617ad9ea63SDarrick J. Wong 	return 0;
3627ad9ea63SDarrick J. Wong }
3637ad9ea63SDarrick J. Wong 
3647ad9ea63SDarrick J. Wong /*
3657ad9ea63SDarrick J. Wong  * Make sure that a gap in the reference count records does not correspond to
3667ad9ea63SDarrick J. Wong  * overlapping records (i.e. shared extents) in the reverse mappings.
3677ad9ea63SDarrick J. Wong  */
3687ad9ea63SDarrick J. Wong static inline void
3697ad9ea63SDarrick J. Wong xchk_refcountbt_xref_gaps(
3707ad9ea63SDarrick J. Wong 	struct xfs_scrub	*sc,
3717ad9ea63SDarrick J. Wong 	struct xchk_refcbt_records *rrc,
3727ad9ea63SDarrick J. Wong 	xfs_agblock_t		bno)
3737ad9ea63SDarrick J. Wong {
3747ad9ea63SDarrick J. Wong 	struct xfs_rmap_irec	low;
3757ad9ea63SDarrick J. Wong 	struct xfs_rmap_irec	high;
3767ad9ea63SDarrick J. Wong 	xfs_agblock_t		next_bno = NULLAGBLOCK;
3777ad9ea63SDarrick J. Wong 	int			error;
3787ad9ea63SDarrick J. Wong 
3797ad9ea63SDarrick J. Wong 	if (bno <= rrc->next_unshared_agbno || !sc->sa.rmap_cur ||
3807ad9ea63SDarrick J. Wong             xchk_skip_xref(sc->sm))
3817ad9ea63SDarrick J. Wong 		return;
3827ad9ea63SDarrick J. Wong 
3837ad9ea63SDarrick J. Wong 	memset(&low, 0, sizeof(low));
3847ad9ea63SDarrick J. Wong 	low.rm_startblock = rrc->next_unshared_agbno;
3857ad9ea63SDarrick J. Wong 	memset(&high, 0xFF, sizeof(high));
3867ad9ea63SDarrick J. Wong 	high.rm_startblock = bno - 1;
3877ad9ea63SDarrick J. Wong 
3887ad9ea63SDarrick J. Wong 	error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
3897ad9ea63SDarrick J. Wong 			xchk_refcountbt_rmap_check_gap, &next_bno);
3907ad9ea63SDarrick J. Wong 	if (error == -ECANCELED)
3917ad9ea63SDarrick J. Wong 		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
3927ad9ea63SDarrick J. Wong 	else
3937ad9ea63SDarrick J. Wong 		xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur);
3947ad9ea63SDarrick J. Wong }
3957ad9ea63SDarrick J. Wong 
396*db0502b3SDarrick J. Wong static inline bool
397*db0502b3SDarrick J. Wong xchk_refcount_mergeable(
398*db0502b3SDarrick J. Wong 	struct xchk_refcbt_records	*rrc,
399*db0502b3SDarrick J. Wong 	const struct xfs_refcount_irec	*r2)
400*db0502b3SDarrick J. Wong {
401*db0502b3SDarrick J. Wong 	const struct xfs_refcount_irec	*r1 = &rrc->prev_rec;
402*db0502b3SDarrick J. Wong 
403*db0502b3SDarrick J. Wong 	/* Ignore if prev_rec is not yet initialized. */
404*db0502b3SDarrick J. Wong 	if (r1->rc_blockcount > 0)
405*db0502b3SDarrick J. Wong 		return false;
406*db0502b3SDarrick J. Wong 
407*db0502b3SDarrick J. Wong 	if (r1->rc_domain != r2->rc_domain)
408*db0502b3SDarrick J. Wong 		return false;
409*db0502b3SDarrick J. Wong 	if (r1->rc_startblock + r1->rc_blockcount != r2->rc_startblock)
410*db0502b3SDarrick J. Wong 		return false;
411*db0502b3SDarrick J. Wong 	if (r1->rc_refcount != r2->rc_refcount)
412*db0502b3SDarrick J. Wong 		return false;
413*db0502b3SDarrick J. Wong 	if ((unsigned long long)r1->rc_blockcount + r2->rc_blockcount >
414*db0502b3SDarrick J. Wong 			MAXREFCEXTLEN)
415*db0502b3SDarrick J. Wong 		return false;
416*db0502b3SDarrick J. Wong 
417*db0502b3SDarrick J. Wong 	return true;
418*db0502b3SDarrick J. Wong }
419*db0502b3SDarrick J. Wong 
420*db0502b3SDarrick J. Wong /* Flag failures for records that could be merged. */
421*db0502b3SDarrick J. Wong STATIC void
422*db0502b3SDarrick J. Wong xchk_refcountbt_check_mergeable(
423*db0502b3SDarrick J. Wong 	struct xchk_btree		*bs,
424*db0502b3SDarrick J. Wong 	struct xchk_refcbt_records	*rrc,
425*db0502b3SDarrick J. Wong 	const struct xfs_refcount_irec	*irec)
426*db0502b3SDarrick J. Wong {
427*db0502b3SDarrick J. Wong 	if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
428*db0502b3SDarrick J. Wong 		return;
429*db0502b3SDarrick J. Wong 
430*db0502b3SDarrick J. Wong 	if (xchk_refcount_mergeable(rrc, irec))
431*db0502b3SDarrick J. Wong 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
432*db0502b3SDarrick J. Wong 
433*db0502b3SDarrick J. Wong 	memcpy(&rrc->prev_rec, irec, sizeof(struct xfs_refcount_irec));
434*db0502b3SDarrick J. Wong }
435*db0502b3SDarrick J. Wong 
436edc09b52SDarrick J. Wong /* Scrub a refcountbt record. */
437edc09b52SDarrick J. Wong STATIC int
438c517b3aaSDarrick J. Wong xchk_refcountbt_rec(
439c517b3aaSDarrick J. Wong 	struct xchk_btree	*bs,
44022ece4e8SDarrick J. Wong 	const union xfs_btree_rec *rec)
441edc09b52SDarrick J. Wong {
4425a8c345cSDarrick J. Wong 	struct xfs_refcount_irec irec;
4437ad9ea63SDarrick J. Wong 	struct xchk_refcbt_records *rrc = bs->private;
444edc09b52SDarrick J. Wong 
4455a8c345cSDarrick J. Wong 	xfs_refcount_btrec_to_irec(rec, &irec);
4462b30cc0bSDarrick J. Wong 	if (xfs_refcount_check_irec(bs->cur, &irec) != NULL) {
447c517b3aaSDarrick J. Wong 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
4482b30cc0bSDarrick J. Wong 		return 0;
4492b30cc0bSDarrick J. Wong 	}
450f492135dSDarrick J. Wong 
451f492135dSDarrick J. Wong 	if (irec.rc_domain == XFS_REFC_DOMAIN_COW)
4527ad9ea63SDarrick J. Wong 		rrc->cow_blocks += irec.rc_blockcount;
4537ad9ea63SDarrick J. Wong 
4547ad9ea63SDarrick J. Wong 	/* Shared records always come before CoW records. */
4557ad9ea63SDarrick J. Wong 	if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED &&
4567ad9ea63SDarrick J. Wong 	    rrc->prev_domain == XFS_REFC_DOMAIN_COW)
4577ad9ea63SDarrick J. Wong 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
4587ad9ea63SDarrick J. Wong 	rrc->prev_domain = irec.rc_domain;
459edc09b52SDarrick J. Wong 
460*db0502b3SDarrick J. Wong 	xchk_refcountbt_check_mergeable(bs, rrc, &irec);
4615a8c345cSDarrick J. Wong 	xchk_refcountbt_xref(bs->sc, &irec);
462166d7641SDarrick J. Wong 
4637ad9ea63SDarrick J. Wong 	/*
4647ad9ea63SDarrick J. Wong 	 * If this is a record for a shared extent, check that all blocks
4657ad9ea63SDarrick J. Wong 	 * between the previous record and this one have at most one reverse
4667ad9ea63SDarrick J. Wong 	 * mapping.
4677ad9ea63SDarrick J. Wong 	 */
4687ad9ea63SDarrick J. Wong 	if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED) {
4697ad9ea63SDarrick J. Wong 		xchk_refcountbt_xref_gaps(bs->sc, rrc, irec.rc_startblock);
4707ad9ea63SDarrick J. Wong 		rrc->next_unshared_agbno = irec.rc_startblock +
4717ad9ea63SDarrick J. Wong 					   irec.rc_blockcount;
4727ad9ea63SDarrick J. Wong 	}
4737ad9ea63SDarrick J. Wong 
474d5cc14d9SAliasgar Surti 	return 0;
475edc09b52SDarrick J. Wong }
476edc09b52SDarrick J. Wong 
477dbde19daSDarrick J. Wong /* Make sure we have as many refc blocks as the rmap says. */
478dbde19daSDarrick J. Wong STATIC void
479c517b3aaSDarrick J. Wong xchk_refcount_xref_rmap(
4801d8a748aSDarrick J. Wong 	struct xfs_scrub	*sc,
481dbde19daSDarrick J. Wong 	xfs_filblks_t		cow_blocks)
482dbde19daSDarrick J. Wong {
483dbde19daSDarrick J. Wong 	xfs_extlen_t		refcbt_blocks = 0;
484dbde19daSDarrick J. Wong 	xfs_filblks_t		blocks;
485dbde19daSDarrick J. Wong 	int			error;
486dbde19daSDarrick J. Wong 
487c517b3aaSDarrick J. Wong 	if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
488dbde19daSDarrick J. Wong 		return;
489dbde19daSDarrick J. Wong 
490dbde19daSDarrick J. Wong 	/* Check that we saw as many refcbt blocks as the rmap knows about. */
491dbde19daSDarrick J. Wong 	error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
492c517b3aaSDarrick J. Wong 	if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
493dbde19daSDarrick J. Wong 		return;
4947280fedaSDarrick J. Wong 	error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
4957280fedaSDarrick J. Wong 			&XFS_RMAP_OINFO_REFC, &blocks);
496c517b3aaSDarrick J. Wong 	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
497dbde19daSDarrick J. Wong 		return;
498dbde19daSDarrick J. Wong 	if (blocks != refcbt_blocks)
499c517b3aaSDarrick J. Wong 		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
500dbde19daSDarrick J. Wong 
501dbde19daSDarrick J. Wong 	/* Check that we saw as many cow blocks as the rmap knows about. */
5027280fedaSDarrick J. Wong 	error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
5037280fedaSDarrick J. Wong 			&XFS_RMAP_OINFO_COW, &blocks);
504c517b3aaSDarrick J. Wong 	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
505dbde19daSDarrick J. Wong 		return;
506dbde19daSDarrick J. Wong 	if (blocks != cow_blocks)
507c517b3aaSDarrick J. Wong 		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
508dbde19daSDarrick J. Wong }
509dbde19daSDarrick J. Wong 
510edc09b52SDarrick J. Wong /* Scrub the refcount btree for some AG. */
511edc09b52SDarrick J. Wong int
512c517b3aaSDarrick J. Wong xchk_refcountbt(
5131d8a748aSDarrick J. Wong 	struct xfs_scrub	*sc)
514edc09b52SDarrick J. Wong {
5157ad9ea63SDarrick J. Wong 	struct xchk_refcbt_records rrc = {
5167ad9ea63SDarrick J. Wong 		.cow_blocks		= 0,
5177ad9ea63SDarrick J. Wong 		.next_unshared_agbno	= 0,
5187ad9ea63SDarrick J. Wong 		.prev_domain		= XFS_REFC_DOMAIN_SHARED,
5197ad9ea63SDarrick J. Wong 	};
520dbde19daSDarrick J. Wong 	int			error;
521edc09b52SDarrick J. Wong 
522c517b3aaSDarrick J. Wong 	error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
5237ad9ea63SDarrick J. Wong 			&XFS_RMAP_OINFO_REFC, &rrc);
524dbde19daSDarrick J. Wong 	if (error)
525dbde19daSDarrick J. Wong 		return error;
526dbde19daSDarrick J. Wong 
5277ad9ea63SDarrick J. Wong 	/*
5287ad9ea63SDarrick J. Wong 	 * Check that all blocks between the last refcount > 1 record and the
5297ad9ea63SDarrick J. Wong 	 * end of the AG have at most one reverse mapping.
5307ad9ea63SDarrick J. Wong 	 */
5317ad9ea63SDarrick J. Wong 	xchk_refcountbt_xref_gaps(sc, &rrc, sc->mp->m_sb.sb_agblocks);
5327ad9ea63SDarrick J. Wong 
5337ad9ea63SDarrick J. Wong 	xchk_refcount_xref_rmap(sc, rrc.cow_blocks);
534dbde19daSDarrick J. Wong 
535dbde19daSDarrick J. Wong 	return 0;
536edc09b52SDarrick J. Wong }
537f6d5fc21SDarrick J. Wong 
538f6d5fc21SDarrick J. Wong /* xref check that a cow staging extent is marked in the refcountbt. */
539f6d5fc21SDarrick J. Wong void
540c517b3aaSDarrick J. Wong xchk_xref_is_cow_staging(
5411d8a748aSDarrick J. Wong 	struct xfs_scrub		*sc,
542f6d5fc21SDarrick J. Wong 	xfs_agblock_t			agbno,
543f6d5fc21SDarrick J. Wong 	xfs_extlen_t			len)
544f6d5fc21SDarrick J. Wong {
545f6d5fc21SDarrick J. Wong 	struct xfs_refcount_irec	rc;
546f6d5fc21SDarrick J. Wong 	int				has_refcount;
547f6d5fc21SDarrick J. Wong 	int				error;
548f6d5fc21SDarrick J. Wong 
549c517b3aaSDarrick J. Wong 	if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
550f6d5fc21SDarrick J. Wong 		return;
551f6d5fc21SDarrick J. Wong 
552f6d5fc21SDarrick J. Wong 	/* Find the CoW staging extent. */
5539a50ee4fSDarrick J. Wong 	error = xfs_refcount_lookup_le(sc->sa.refc_cur, XFS_REFC_DOMAIN_COW,
5549a50ee4fSDarrick J. Wong 			agbno, &has_refcount);
555c517b3aaSDarrick J. Wong 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
556f6d5fc21SDarrick J. Wong 		return;
557f6d5fc21SDarrick J. Wong 	if (!has_refcount) {
558c517b3aaSDarrick J. Wong 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
559f6d5fc21SDarrick J. Wong 		return;
560f6d5fc21SDarrick J. Wong 	}
561f6d5fc21SDarrick J. Wong 
562f6d5fc21SDarrick J. Wong 	error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
563c517b3aaSDarrick J. Wong 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
564f6d5fc21SDarrick J. Wong 		return;
565f6d5fc21SDarrick J. Wong 	if (!has_refcount) {
566c517b3aaSDarrick J. Wong 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
567f6d5fc21SDarrick J. Wong 		return;
568f6d5fc21SDarrick J. Wong 	}
569f6d5fc21SDarrick J. Wong 
570f62ac3e0SDarrick J. Wong 	/* CoW lookup returned a shared extent record? */
571f62ac3e0SDarrick J. Wong 	if (rc.rc_domain != XFS_REFC_DOMAIN_COW)
572c517b3aaSDarrick J. Wong 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
573f6d5fc21SDarrick J. Wong 
574f6d5fc21SDarrick J. Wong 	/* Must be at least as long as what was passed in */
575f6d5fc21SDarrick J. Wong 	if (rc.rc_blockcount < len)
576c517b3aaSDarrick J. Wong 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
577f6d5fc21SDarrick J. Wong }
578f6d5fc21SDarrick J. Wong 
579f6d5fc21SDarrick J. Wong /*
580f6d5fc21SDarrick J. Wong  * xref check that the extent is not shared.  Only file data blocks
581f6d5fc21SDarrick J. Wong  * can have multiple owners.
582f6d5fc21SDarrick J. Wong  */
583f6d5fc21SDarrick J. Wong void
584c517b3aaSDarrick J. Wong xchk_xref_is_not_shared(
5851d8a748aSDarrick J. Wong 	struct xfs_scrub	*sc,
586f6d5fc21SDarrick J. Wong 	xfs_agblock_t		agbno,
587f6d5fc21SDarrick J. Wong 	xfs_extlen_t		len)
588f6d5fc21SDarrick J. Wong {
5896abc7aefSDarrick J. Wong 	enum xbtree_recpacking	outcome;
590f6d5fc21SDarrick J. Wong 	int			error;
591f6d5fc21SDarrick J. Wong 
592c517b3aaSDarrick J. Wong 	if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
593f6d5fc21SDarrick J. Wong 		return;
594f6d5fc21SDarrick J. Wong 
5956abc7aefSDarrick J. Wong 	error = xfs_refcount_has_records(sc->sa.refc_cur,
5966abc7aefSDarrick J. Wong 			XFS_REFC_DOMAIN_SHARED, agbno, len, &outcome);
597c517b3aaSDarrick J. Wong 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
598f6d5fc21SDarrick J. Wong 		return;
5996abc7aefSDarrick J. Wong 	if (outcome != XBTREE_RECPACKING_EMPTY)
600c517b3aaSDarrick J. Wong 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
601f6d5fc21SDarrick J. Wong }
6027ac14fa2SDarrick J. Wong 
6037ac14fa2SDarrick J. Wong /* xref check that the extent is not being used for CoW staging. */
6047ac14fa2SDarrick J. Wong void
6057ac14fa2SDarrick J. Wong xchk_xref_is_not_cow_staging(
6067ac14fa2SDarrick J. Wong 	struct xfs_scrub	*sc,
6077ac14fa2SDarrick J. Wong 	xfs_agblock_t		agbno,
6087ac14fa2SDarrick J. Wong 	xfs_extlen_t		len)
6097ac14fa2SDarrick J. Wong {
6107ac14fa2SDarrick J. Wong 	enum xbtree_recpacking	outcome;
6117ac14fa2SDarrick J. Wong 	int			error;
6127ac14fa2SDarrick J. Wong 
6137ac14fa2SDarrick J. Wong 	if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
6147ac14fa2SDarrick J. Wong 		return;
6157ac14fa2SDarrick J. Wong 
6167ac14fa2SDarrick J. Wong 	error = xfs_refcount_has_records(sc->sa.refc_cur, XFS_REFC_DOMAIN_COW,
6177ac14fa2SDarrick J. Wong 			agbno, len, &outcome);
6187ac14fa2SDarrick J. Wong 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
6197ac14fa2SDarrick J. Wong 		return;
6207ac14fa2SDarrick J. Wong 	if (outcome != XBTREE_RECPACKING_EMPTY)
6217ac14fa2SDarrick J. Wong 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
6227ac14fa2SDarrick J. Wong }
623