xref: /openbmc/linux/fs/xfs/scrub/ialloc.c (revision 5a170e9e)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
14 #include "xfs_bit.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
17 #include "xfs_sb.h"
18 #include "xfs_inode.h"
19 #include "xfs_alloc.h"
20 #include "xfs_ialloc.h"
21 #include "xfs_ialloc_btree.h"
22 #include "xfs_icache.h"
23 #include "xfs_rmap.h"
24 #include "xfs_log.h"
25 #include "xfs_trans_priv.h"
26 #include "scrub/xfs_scrub.h"
27 #include "scrub/scrub.h"
28 #include "scrub/common.h"
29 #include "scrub/btree.h"
30 #include "scrub/trace.h"
31 
32 /*
33  * Set us up to scrub inode btrees.
34  * If we detect a discrepancy between the inobt and the inode,
35  * try again after forcing logged inode cores out to disk.
36  */
37 int
38 xchk_setup_ag_iallocbt(
39 	struct xfs_scrub	*sc,
40 	struct xfs_inode	*ip)
41 {
42 	return xchk_setup_ag_btree(sc, ip, sc->try_harder);
43 }
44 
45 /* Inode btree scrubber. */
46 
47 struct xchk_iallocbt {
48 	/* Number of inodes we see while scanning inobt. */
49 	unsigned long long	inodes;
50 };
51 
52 /*
53  * If we're checking the finobt, cross-reference with the inobt.
54  * Otherwise we're checking the inobt; if there is an finobt, make sure
55  * we have a record or not depending on freecount.
56  */
57 static inline void
58 xchk_iallocbt_chunk_xref_other(
59 	struct xfs_scrub		*sc,
60 	struct xfs_inobt_rec_incore	*irec,
61 	xfs_agino_t			agino)
62 {
63 	struct xfs_btree_cur		**pcur;
64 	bool				has_irec;
65 	int				error;
66 
67 	if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
68 		pcur = &sc->sa.ino_cur;
69 	else
70 		pcur = &sc->sa.fino_cur;
71 	if (!(*pcur))
72 		return;
73 	error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
74 	if (!xchk_should_check_xref(sc, &error, pcur))
75 		return;
76 	if (((irec->ir_freecount > 0 && !has_irec) ||
77 	     (irec->ir_freecount == 0 && has_irec)))
78 		xchk_btree_xref_set_corrupt(sc, *pcur, 0);
79 }
80 
81 /* Cross-reference with the other btrees. */
82 STATIC void
83 xchk_iallocbt_chunk_xref(
84 	struct xfs_scrub		*sc,
85 	struct xfs_inobt_rec_incore	*irec,
86 	xfs_agino_t			agino,
87 	xfs_agblock_t			agbno,
88 	xfs_extlen_t			len)
89 {
90 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
91 		return;
92 
93 	xchk_xref_is_used_space(sc, agbno, len);
94 	xchk_iallocbt_chunk_xref_other(sc, irec, agino);
95 	xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
96 	xchk_xref_is_not_shared(sc, agbno, len);
97 }
98 
99 /* Is this chunk worth checking? */
100 STATIC bool
101 xchk_iallocbt_chunk(
102 	struct xchk_btree		*bs,
103 	struct xfs_inobt_rec_incore	*irec,
104 	xfs_agino_t			agino,
105 	xfs_extlen_t			len)
106 {
107 	struct xfs_mount		*mp = bs->cur->bc_mp;
108 	xfs_agnumber_t			agno = bs->cur->bc_private.a.agno;
109 	xfs_agblock_t			bno;
110 
111 	bno = XFS_AGINO_TO_AGBNO(mp, agino);
112 	if (bno + len <= bno ||
113 	    !xfs_verify_agbno(mp, agno, bno) ||
114 	    !xfs_verify_agbno(mp, agno, bno + len - 1))
115 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
116 
117 	xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
118 
119 	return true;
120 }
121 
122 /* Count the number of free inodes. */
123 static unsigned int
124 xchk_iallocbt_freecount(
125 	xfs_inofree_t			freemask)
126 {
127 	BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
128 	return hweight64(freemask);
129 }
130 
131 /* Check a particular inode with ir_free. */
132 STATIC int
133 xchk_iallocbt_check_cluster_freemask(
134 	struct xchk_btree		*bs,
135 	xfs_ino_t			fsino,
136 	xfs_agino_t			chunkino,
137 	xfs_agino_t			clusterino,
138 	struct xfs_inobt_rec_incore	*irec,
139 	struct xfs_buf			*bp)
140 {
141 	struct xfs_dinode		*dip;
142 	struct xfs_mount		*mp = bs->cur->bc_mp;
143 	bool				inode_is_free = false;
144 	bool				freemask_ok;
145 	bool				inuse;
146 	int				error = 0;
147 
148 	if (xchk_should_terminate(bs->sc, &error))
149 		return error;
150 
151 	dip = xfs_buf_offset(bp, clusterino * mp->m_sb.sb_inodesize);
152 	if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
153 	    (dip->di_version >= 3 &&
154 	     be64_to_cpu(dip->di_ino) != fsino + clusterino)) {
155 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
156 		goto out;
157 	}
158 
159 	if (irec->ir_free & XFS_INOBT_MASK(chunkino + clusterino))
160 		inode_is_free = true;
161 	error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp,
162 			fsino + clusterino, &inuse);
163 	if (error == -ENODATA) {
164 		/* Not cached, just read the disk buffer */
165 		freemask_ok = inode_is_free ^ !!(dip->di_mode);
166 		if (!bs->sc->try_harder && !freemask_ok)
167 			return -EDEADLOCK;
168 	} else if (error < 0) {
169 		/*
170 		 * Inode is only half assembled, or there was an IO error,
171 		 * or the verifier failed, so don't bother trying to check.
172 		 * The inode scrubber can deal with this.
173 		 */
174 		goto out;
175 	} else {
176 		/* Inode is all there. */
177 		freemask_ok = inode_is_free ^ inuse;
178 	}
179 	if (!freemask_ok)
180 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
181 out:
182 	return 0;
183 }
184 
185 /* Make sure the free mask is consistent with what the inodes think. */
186 STATIC int
187 xchk_iallocbt_check_freemask(
188 	struct xchk_btree		*bs,
189 	struct xfs_inobt_rec_incore	*irec)
190 {
191 	struct xfs_imap			imap;
192 	struct xfs_mount		*mp = bs->cur->bc_mp;
193 	struct xfs_dinode		*dip;
194 	struct xfs_buf			*bp;
195 	xfs_ino_t			fsino;
196 	xfs_agino_t			nr_inodes;
197 	xfs_agino_t			agino;
198 	xfs_agino_t			chunkino;
199 	xfs_agino_t			clusterino;
200 	xfs_agblock_t			agbno;
201 	uint16_t			holemask;
202 	uint16_t			ir_holemask;
203 	int				error = 0;
204 
205 	/* Make sure the freemask matches the inode records. */
206 	nr_inodes = mp->m_inodes_per_cluster;
207 
208 	for (agino = irec->ir_startino;
209 	     agino < irec->ir_startino + XFS_INODES_PER_CHUNK;
210 	     agino += mp->m_inodes_per_cluster) {
211 		fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
212 		chunkino = agino - irec->ir_startino;
213 		agbno = XFS_AGINO_TO_AGBNO(mp, agino);
214 
215 		/* Compute the holemask mask for this cluster. */
216 		for (clusterino = 0, holemask = 0; clusterino < nr_inodes;
217 		     clusterino += XFS_INODES_PER_HOLEMASK_BIT)
218 			holemask |= XFS_INOBT_MASK((chunkino + clusterino) /
219 					XFS_INODES_PER_HOLEMASK_BIT);
220 
221 		/* The whole cluster must be a hole or not a hole. */
222 		ir_holemask = (irec->ir_holemask & holemask);
223 		if (ir_holemask != holemask && ir_holemask != 0) {
224 			xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
225 			continue;
226 		}
227 
228 		/* If any part of this is a hole, skip it. */
229 		if (ir_holemask) {
230 			xchk_xref_is_not_owned_by(bs->sc, agbno,
231 					mp->m_blocks_per_cluster,
232 					&XFS_RMAP_OINFO_INODES);
233 			continue;
234 		}
235 
236 		xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
237 				&XFS_RMAP_OINFO_INODES);
238 
239 		/* Grab the inode cluster buffer. */
240 		imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno,
241 				agbno);
242 		imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
243 		imap.im_boffset = 0;
244 
245 		error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
246 				&dip, &bp, 0, 0);
247 		if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0,
248 				&error))
249 			continue;
250 
251 		/* Which inodes are free? */
252 		for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
253 			error = xchk_iallocbt_check_cluster_freemask(bs,
254 					fsino, chunkino, clusterino, irec, bp);
255 			if (error) {
256 				xfs_trans_brelse(bs->cur->bc_tp, bp);
257 				return error;
258 			}
259 		}
260 
261 		xfs_trans_brelse(bs->cur->bc_tp, bp);
262 	}
263 
264 	return error;
265 }
266 
267 /* Scrub an inobt/finobt record. */
268 STATIC int
269 xchk_iallocbt_rec(
270 	struct xchk_btree		*bs,
271 	union xfs_btree_rec		*rec)
272 {
273 	struct xfs_mount		*mp = bs->cur->bc_mp;
274 	struct xchk_iallocbt		*iabt = bs->private;
275 	struct xfs_inobt_rec_incore	irec;
276 	uint64_t			holes;
277 	xfs_agnumber_t			agno = bs->cur->bc_private.a.agno;
278 	xfs_agino_t			agino;
279 	xfs_agblock_t			agbno;
280 	xfs_extlen_t			len;
281 	int				holecount;
282 	int				i;
283 	int				error = 0;
284 	unsigned int			real_freecount;
285 	uint16_t			holemask;
286 
287 	xfs_inobt_btrec_to_irec(mp, rec, &irec);
288 
289 	if (irec.ir_count > XFS_INODES_PER_CHUNK ||
290 	    irec.ir_freecount > XFS_INODES_PER_CHUNK)
291 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
292 
293 	real_freecount = irec.ir_freecount +
294 			(XFS_INODES_PER_CHUNK - irec.ir_count);
295 	if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
296 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
297 
298 	agino = irec.ir_startino;
299 	/* Record has to be properly aligned within the AG. */
300 	if (!xfs_verify_agino(mp, agno, agino) ||
301 	    !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
302 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
303 		goto out;
304 	}
305 
306 	/* Make sure this record is aligned to cluster and inoalignmnt size. */
307 	agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino);
308 	if ((agbno & (mp->m_cluster_align - 1)) ||
309 	    (agbno & (mp->m_blocks_per_cluster - 1)))
310 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
311 
312 	iabt->inodes += irec.ir_count;
313 
314 	/* Handle non-sparse inodes */
315 	if (!xfs_inobt_issparse(irec.ir_holemask)) {
316 		len = XFS_B_TO_FSB(mp,
317 				XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
318 		if (irec.ir_count != XFS_INODES_PER_CHUNK)
319 			xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
320 
321 		if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
322 			goto out;
323 		goto check_freemask;
324 	}
325 
326 	/* Check each chunk of a sparse inode cluster. */
327 	holemask = irec.ir_holemask;
328 	holecount = 0;
329 	len = XFS_B_TO_FSB(mp,
330 			XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize);
331 	holes = ~xfs_inobt_irec_to_allocmask(&irec);
332 	if ((holes & irec.ir_free) != holes ||
333 	    irec.ir_freecount > irec.ir_count)
334 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
335 
336 	for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
337 		if (holemask & 1)
338 			holecount += XFS_INODES_PER_HOLEMASK_BIT;
339 		else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
340 			break;
341 		holemask >>= 1;
342 		agino += XFS_INODES_PER_HOLEMASK_BIT;
343 	}
344 
345 	if (holecount > XFS_INODES_PER_CHUNK ||
346 	    holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
347 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
348 
349 check_freemask:
350 	error = xchk_iallocbt_check_freemask(bs, &irec);
351 	if (error)
352 		goto out;
353 
354 out:
355 	return error;
356 }
357 
358 /*
359  * Make sure the inode btrees are as large as the rmap thinks they are.
360  * Don't bother if we're missing btree cursors, as we're already corrupt.
361  */
362 STATIC void
363 xchk_iallocbt_xref_rmap_btreeblks(
364 	struct xfs_scrub	*sc,
365 	int			which)
366 {
367 	xfs_filblks_t		blocks;
368 	xfs_extlen_t		inobt_blocks = 0;
369 	xfs_extlen_t		finobt_blocks = 0;
370 	int			error;
371 
372 	if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
373 	    (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) ||
374 	    xchk_skip_xref(sc->sm))
375 		return;
376 
377 	/* Check that we saw as many inobt blocks as the rmap says. */
378 	error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
379 	if (!xchk_process_error(sc, 0, 0, &error))
380 		return;
381 
382 	if (sc->sa.fino_cur) {
383 		error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
384 		if (!xchk_process_error(sc, 0, 0, &error))
385 			return;
386 	}
387 
388 	error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
389 			&XFS_RMAP_OINFO_INOBT, &blocks);
390 	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
391 		return;
392 	if (blocks != inobt_blocks + finobt_blocks)
393 		xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
394 }
395 
396 /*
397  * Make sure that the inobt records point to the same number of blocks as
398  * the rmap says are owned by inodes.
399  */
400 STATIC void
401 xchk_iallocbt_xref_rmap_inodes(
402 	struct xfs_scrub	*sc,
403 	int			which,
404 	unsigned long long	inodes)
405 {
406 	xfs_filblks_t		blocks;
407 	xfs_filblks_t		inode_blocks;
408 	int			error;
409 
410 	if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
411 		return;
412 
413 	/* Check that we saw as many inode blocks as the rmap knows about. */
414 	error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
415 			&XFS_RMAP_OINFO_INODES, &blocks);
416 	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
417 		return;
418 	inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
419 	if (blocks != inode_blocks)
420 		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
421 }
422 
423 /* Scrub the inode btrees for some AG. */
424 STATIC int
425 xchk_iallocbt(
426 	struct xfs_scrub	*sc,
427 	xfs_btnum_t		which)
428 {
429 	struct xfs_btree_cur	*cur;
430 	struct xchk_iallocbt	iabt = {
431 		.inodes		= 0,
432 	};
433 	int			error;
434 
435 	cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
436 	error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
437 			&iabt);
438 	if (error)
439 		return error;
440 
441 	xchk_iallocbt_xref_rmap_btreeblks(sc, which);
442 
443 	/*
444 	 * If we're scrubbing the inode btree, inode_blocks is the number of
445 	 * blocks pointed to by all the inode chunk records.  Therefore, we
446 	 * should compare to the number of inode chunk blocks that the rmap
447 	 * knows about.  We can't do this for the finobt since it only points
448 	 * to inode chunks with free inodes.
449 	 */
450 	if (which == XFS_BTNUM_INO)
451 		xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
452 
453 	return error;
454 }
455 
456 int
457 xchk_inobt(
458 	struct xfs_scrub	*sc)
459 {
460 	return xchk_iallocbt(sc, XFS_BTNUM_INO);
461 }
462 
463 int
464 xchk_finobt(
465 	struct xfs_scrub	*sc)
466 {
467 	return xchk_iallocbt(sc, XFS_BTNUM_FINO);
468 }
469 
470 /* See if an inode btree has (or doesn't have) an inode chunk record. */
471 static inline void
472 xchk_xref_inode_check(
473 	struct xfs_scrub	*sc,
474 	xfs_agblock_t		agbno,
475 	xfs_extlen_t		len,
476 	struct xfs_btree_cur	**icur,
477 	bool			should_have_inodes)
478 {
479 	bool			has_inodes;
480 	int			error;
481 
482 	if (!(*icur) || xchk_skip_xref(sc->sm))
483 		return;
484 
485 	error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
486 	if (!xchk_should_check_xref(sc, &error, icur))
487 		return;
488 	if (has_inodes != should_have_inodes)
489 		xchk_btree_xref_set_corrupt(sc, *icur, 0);
490 }
491 
492 /* xref check that the extent is not covered by inodes */
493 void
494 xchk_xref_is_not_inode_chunk(
495 	struct xfs_scrub	*sc,
496 	xfs_agblock_t		agbno,
497 	xfs_extlen_t		len)
498 {
499 	xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
500 	xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
501 }
502 
503 /* xref check that the extent is covered by inodes */
504 void
505 xchk_xref_is_inode_chunk(
506 	struct xfs_scrub	*sc,
507 	xfs_agblock_t		agbno,
508 	xfs_extlen_t		len)
509 {
510 	xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
511 }
512