fscounters.c (11f97e684583469fc342a561387cc44fac4f9b1f) fscounters.c (e74331d6fa2c21a8ecccfe0648dad5193b83defe)
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2019 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_alloc.h"
13#include "xfs_ialloc.h"
14#include "xfs_health.h"
15#include "xfs_btree.h"
16#include "xfs_ag.h"
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2019 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_alloc.h"
13#include "xfs_ialloc.h"
14#include "xfs_health.h"
15#include "xfs_btree.h"
16#include "xfs_ag.h"
17#include "xfs_rtalloc.h"
18#include "xfs_inode.h"
17#include "scrub/scrub.h"
18#include "scrub/common.h"
19#include "scrub/trace.h"
20
21/*
22 * FS Summary Counters
23 * ===================
24 *

--- 13 unchanged lines hidden (view full) ---

38 * So the first thing we do is warm up the buffer cache in the setup routine by
39 * walking all the AGs to make sure the incore per-AG structure has been
40 * initialized. The expected value calculation then iterates the incore per-AG
41 * structures as quickly as it can. We snapshot the percpu counters before and
42 * after this operation and use the difference in counter values to guess at
43 * our tolerance for mismatch between expected and actual counter values.
44 */
45
19#include "scrub/scrub.h"
20#include "scrub/common.h"
21#include "scrub/trace.h"
22
23/*
24 * FS Summary Counters
25 * ===================
26 *

--- 13 unchanged lines hidden (view full) ---

40 * So the first thing we do is warm up the buffer cache in the setup routine by
41 * walking all the AGs to make sure the incore per-AG structure has been
42 * initialized. The expected value calculation then iterates the incore per-AG
43 * structures as quickly as it can. We snapshot the percpu counters before and
44 * after this operation and use the difference in counter values to guess at
45 * our tolerance for mismatch between expected and actual counter values.
46 */
47
48struct xchk_fscounters {
49 struct xfs_scrub *sc;
50 uint64_t icount;
51 uint64_t ifree;
52 uint64_t fdblocks;
53 uint64_t frextents;
54 unsigned long long icount_min;
55 unsigned long long icount_max;
56};
57
46/*
47 * Since the expected value computation is lockless but only browses incore
48 * values, the percpu counters should be fairly close to each other. However,
49 * we'll allow ourselves to be off by at least this (arbitrary) amount.
50 */
51#define XCHK_FSCOUNT_MIN_VARIANCE (512)
52
53/*

--- 61 unchanged lines hidden (view full) ---

115{
116 struct xchk_fscounters *fsc;
117 int error;
118
119 sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS);
120 if (!sc->buf)
121 return -ENOMEM;
122 fsc = sc->buf;
58/*
59 * Since the expected value computation is lockless but only browses incore
60 * values, the percpu counters should be fairly close to each other. However,
61 * we'll allow ourselves to be off by at least this (arbitrary) amount.
62 */
63#define XCHK_FSCOUNT_MIN_VARIANCE (512)
64
65/*

--- 61 unchanged lines hidden (view full) ---

127{
128 struct xchk_fscounters *fsc;
129 int error;
130
131 sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS);
132 if (!sc->buf)
133 return -ENOMEM;
134 fsc = sc->buf;
135 fsc->sc = sc;
123
124 xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max);
125
126 /* We must get the incore counters set up before we can proceed. */
127 error = xchk_fscount_warmup(sc);
128 if (error)
129 return error;
130

--- 145 unchanged lines hidden (view full) ---

276 goto retry;
277 xchk_set_incomplete(sc);
278 return 0;
279 }
280
281 return 0;
282}
283
136
137 xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max);
138
139 /* We must get the incore counters set up before we can proceed. */
140 error = xchk_fscount_warmup(sc);
141 if (error)
142 return error;
143

--- 145 unchanged lines hidden (view full) ---

289 goto retry;
290 xchk_set_incomplete(sc);
291 return 0;
292 }
293
294 return 0;
295}
296
297#ifdef CONFIG_XFS_RT
298STATIC int
299xchk_fscount_add_frextent(
300 struct xfs_mount *mp,
301 struct xfs_trans *tp,
302 const struct xfs_rtalloc_rec *rec,
303 void *priv)
304{
305 struct xchk_fscounters *fsc = priv;
306 int error = 0;
307
308 fsc->frextents += rec->ar_extcount;
309
310 xchk_should_terminate(fsc->sc, &error);
311 return error;
312}
313
314/* Calculate the number of free realtime extents from the realtime bitmap. */
315STATIC int
316xchk_fscount_count_frextents(
317 struct xfs_scrub *sc,
318 struct xchk_fscounters *fsc)
319{
320 struct xfs_mount *mp = sc->mp;
321 int error;
322
323 fsc->frextents = 0;
324 if (!xfs_has_realtime(mp))
325 return 0;
326
327 xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
328 error = xfs_rtalloc_query_all(sc->mp, sc->tp,
329 xchk_fscount_add_frextent, fsc);
330 if (error) {
331 xchk_set_incomplete(sc);
332 goto out_unlock;
333 }
334
335out_unlock:
336 xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
337 return error;
338}
339#else
340STATIC int
341xchk_fscount_count_frextents(
342 struct xfs_scrub *sc,
343 struct xchk_fscounters *fsc)
344{
345 fsc->frextents = 0;
346 return 0;
347}
348#endif /* CONFIG_XFS_RT */
349
284/*
285 * Part 2: Comparing filesystem summary counters. All we have to do here is
286 * sum the percpu counters and compare them to what we've observed.
287 */
288
289/*
290 * Is the @counter reasonably close to the @expected value?
291 *

--- 55 unchanged lines hidden (view full) ---

347
348/* Check the superblock counters. */
349int
350xchk_fscounters(
351 struct xfs_scrub *sc)
352{
353 struct xfs_mount *mp = sc->mp;
354 struct xchk_fscounters *fsc = sc->buf;
350/*
351 * Part 2: Comparing filesystem summary counters. All we have to do here is
352 * sum the percpu counters and compare them to what we've observed.
353 */
354
355/*
356 * Is the @counter reasonably close to the @expected value?
357 *

--- 55 unchanged lines hidden (view full) ---

413
414/* Check the superblock counters. */
415int
416xchk_fscounters(
417 struct xfs_scrub *sc)
418{
419 struct xfs_mount *mp = sc->mp;
420 struct xchk_fscounters *fsc = sc->buf;
355 int64_t icount, ifree, fdblocks;
421 int64_t icount, ifree, fdblocks, frextents;
356 int error;
357
358 /* Snapshot the percpu counters. */
359 icount = percpu_counter_sum(&mp->m_icount);
360 ifree = percpu_counter_sum(&mp->m_ifree);
361 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
422 int error;
423
424 /* Snapshot the percpu counters. */
425 icount = percpu_counter_sum(&mp->m_icount);
426 ifree = percpu_counter_sum(&mp->m_ifree);
427 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
428 frextents = percpu_counter_sum(&mp->m_frextents);
362
363 /* No negative values, please! */
429
430 /* No negative values, please! */
364 if (icount < 0 || ifree < 0 || fdblocks < 0)
431 if (icount < 0 || ifree < 0 || fdblocks < 0 || frextents < 0)
365 xchk_set_corrupt(sc);
366
367 /* See if icount is obviously wrong. */
368 if (icount < fsc->icount_min || icount > fsc->icount_max)
369 xchk_set_corrupt(sc);
370
371 /* See if fdblocks is obviously wrong. */
372 if (fdblocks > mp->m_sb.sb_dblocks)
373 xchk_set_corrupt(sc);
374
432 xchk_set_corrupt(sc);
433
434 /* See if icount is obviously wrong. */
435 if (icount < fsc->icount_min || icount > fsc->icount_max)
436 xchk_set_corrupt(sc);
437
438 /* See if fdblocks is obviously wrong. */
439 if (fdblocks > mp->m_sb.sb_dblocks)
440 xchk_set_corrupt(sc);
441
442 /* See if frextents is obviously wrong. */
443 if (frextents > mp->m_sb.sb_rextents)
444 xchk_set_corrupt(sc);
445
375 /*
376 * If ifree exceeds icount by more than the minimum variance then
377 * something's probably wrong with the counters.
378 */
379 if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE)
380 xchk_set_corrupt(sc);
381
382 /* Walk the incore AG headers to calculate the expected counters. */
383 error = xchk_fscount_aggregate_agcounts(sc, fsc);
384 if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error))
385 return error;
386 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
387 return 0;
388
446 /*
447 * If ifree exceeds icount by more than the minimum variance then
448 * something's probably wrong with the counters.
449 */
450 if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE)
451 xchk_set_corrupt(sc);
452
453 /* Walk the incore AG headers to calculate the expected counters. */
454 error = xchk_fscount_aggregate_agcounts(sc, fsc);
455 if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error))
456 return error;
457 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
458 return 0;
459
460 /* Count the free extents counter for rt volumes. */
461 error = xchk_fscount_count_frextents(sc, fsc);
462 if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error))
463 return error;
464 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
465 return 0;
466
389 /* Compare the in-core counters with whatever we counted. */
390 if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount))
391 xchk_set_corrupt(sc);
392
393 if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree))
394 xchk_set_corrupt(sc);
395
396 if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks,
397 fsc->fdblocks))
398 xchk_set_corrupt(sc);
399
467 /* Compare the in-core counters with whatever we counted. */
468 if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount))
469 xchk_set_corrupt(sc);
470
471 if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree))
472 xchk_set_corrupt(sc);
473
474 if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks,
475 fsc->fdblocks))
476 xchk_set_corrupt(sc);
477
478 if (!xchk_fscount_within_range(sc, frextents, &mp->m_frextents,
479 fsc->frextents))
480 xchk_set_corrupt(sc);
481
400 return 0;
401}
482 return 0;
483}