175efa57dSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0+ 275efa57dSDarrick J. Wong /* 375efa57dSDarrick J. Wong * Copyright (C) 2019 Oracle. All Rights Reserved. 475efa57dSDarrick J. Wong * Author: Darrick J. Wong <darrick.wong@oracle.com> 575efa57dSDarrick J. Wong */ 675efa57dSDarrick J. Wong #include "xfs.h" 775efa57dSDarrick J. Wong #include "xfs_fs.h" 875efa57dSDarrick J. Wong #include "xfs_shared.h" 975efa57dSDarrick J. Wong #include "xfs_format.h" 1075efa57dSDarrick J. Wong #include "xfs_trans_resv.h" 1175efa57dSDarrick J. Wong #include "xfs_mount.h" 1275efa57dSDarrick J. Wong #include "xfs_alloc.h" 1375efa57dSDarrick J. Wong #include "xfs_ialloc.h" 1475efa57dSDarrick J. Wong #include "xfs_health.h" 15e147a756SDarrick J. Wong #include "xfs_btree.h" 169bbafc71SDave Chinner #include "xfs_ag.h" 1775efa57dSDarrick J. Wong #include "scrub/scrub.h" 1875efa57dSDarrick J. Wong #include "scrub/common.h" 1975efa57dSDarrick J. Wong #include "scrub/trace.h" 2075efa57dSDarrick J. Wong 2175efa57dSDarrick J. Wong /* 2275efa57dSDarrick J. Wong * FS Summary Counters 2375efa57dSDarrick J. Wong * =================== 2475efa57dSDarrick J. Wong * 2575efa57dSDarrick J. Wong * The basics of filesystem summary counter checking are that we iterate the 2675efa57dSDarrick J. Wong * AGs counting the number of free blocks, free space btree blocks, per-AG 2775efa57dSDarrick J. Wong * reservations, inodes, delayed allocation reservations, and free inodes. 2875efa57dSDarrick J. Wong * Then we compare what we computed against the in-core counters. 2975efa57dSDarrick J. Wong * 3075efa57dSDarrick J. Wong * However, the reality is that summary counters are a tricky beast to check. 3175efa57dSDarrick J. Wong * While we /could/ freeze the filesystem and scramble around the AGs counting 3275efa57dSDarrick J. Wong * the free blocks, in practice we prefer not do that for a scan because 3375efa57dSDarrick J. Wong * freezing is costly. To get around this, we added a per-cpu counter of the 3475efa57dSDarrick J. Wong * delalloc reservations so that we can rotor around the AGs relatively 3575efa57dSDarrick J. Wong * quickly, and we allow the counts to be slightly off because we're not taking 3675efa57dSDarrick J. Wong * any locks while we do this. 3775efa57dSDarrick J. Wong * 3875efa57dSDarrick J. Wong * So the first thing we do is warm up the buffer cache in the setup routine by 3975efa57dSDarrick J. Wong * walking all the AGs to make sure the incore per-AG structure has been 4075efa57dSDarrick J. Wong * initialized. The expected value calculation then iterates the incore per-AG 4175efa57dSDarrick J. Wong * structures as quickly as it can. We snapshot the percpu counters before and 4275efa57dSDarrick J. Wong * after this operation and use the difference in counter values to guess at 4375efa57dSDarrick J. Wong * our tolerance for mismatch between expected and actual counter values. 4475efa57dSDarrick J. Wong */ 4575efa57dSDarrick J. Wong 4675efa57dSDarrick J. Wong /* 4775efa57dSDarrick J. Wong * Since the expected value computation is lockless but only browses incore 4875efa57dSDarrick J. Wong * values, the percpu counters should be fairly close to each other. However, 4975efa57dSDarrick J. Wong * we'll allow ourselves to be off by at least this (arbitrary) amount. 5075efa57dSDarrick J. Wong */ 5175efa57dSDarrick J. Wong #define XCHK_FSCOUNT_MIN_VARIANCE (512) 5275efa57dSDarrick J. Wong 5375efa57dSDarrick J. Wong /* 5475efa57dSDarrick J. Wong * Make sure the per-AG structure has been initialized from the on-disk header 5575efa57dSDarrick J. Wong * contents and trust that the incore counters match the ondisk counters. (The 5675efa57dSDarrick J. Wong * AGF and AGI scrubbers check them, and a normal xfs_scrub run checks the 5775efa57dSDarrick J. Wong * summary counters after checking all AG headers). Do this from the setup 5875efa57dSDarrick J. Wong * function so that the inner AG aggregation loop runs as quickly as possible. 5975efa57dSDarrick J. Wong * 6075efa57dSDarrick J. Wong * This function runs during the setup phase /before/ we start checking any 6175efa57dSDarrick J. Wong * metadata. 6275efa57dSDarrick J. Wong */ 6375efa57dSDarrick J. Wong STATIC int 6475efa57dSDarrick J. Wong xchk_fscount_warmup( 6575efa57dSDarrick J. Wong struct xfs_scrub *sc) 6675efa57dSDarrick J. Wong { 6775efa57dSDarrick J. Wong struct xfs_mount *mp = sc->mp; 6875efa57dSDarrick J. Wong struct xfs_buf *agi_bp = NULL; 6975efa57dSDarrick J. Wong struct xfs_buf *agf_bp = NULL; 7075efa57dSDarrick J. Wong struct xfs_perag *pag = NULL; 7175efa57dSDarrick J. Wong xfs_agnumber_t agno; 7275efa57dSDarrick J. Wong int error = 0; 7375efa57dSDarrick J. Wong 74f250eedcSDave Chinner for_each_perag(mp, agno, pag) { 75f250eedcSDave Chinner if (xchk_should_terminate(sc, &error)) 76f250eedcSDave Chinner break; 7775efa57dSDarrick J. Wong if (pag->pagi_init && pag->pagf_init) 78f250eedcSDave Chinner continue; 7975efa57dSDarrick J. Wong 8075efa57dSDarrick J. Wong /* Lock both AG headers. */ 8175efa57dSDarrick J. Wong error = xfs_ialloc_read_agi(mp, sc->tp, agno, &agi_bp); 8275efa57dSDarrick J. Wong if (error) 8375efa57dSDarrick J. Wong break; 8475efa57dSDarrick J. Wong error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &agf_bp); 8575efa57dSDarrick J. Wong if (error) 8675efa57dSDarrick J. Wong break; 8775efa57dSDarrick J. Wong 8875efa57dSDarrick J. Wong /* 8975efa57dSDarrick J. Wong * These are supposed to be initialized by the header read 9075efa57dSDarrick J. Wong * function. 9175efa57dSDarrick J. Wong */ 92f250eedcSDave Chinner if (!pag->pagi_init || !pag->pagf_init) { 9375efa57dSDarrick J. Wong error = -EFSCORRUPTED; 9475efa57dSDarrick J. Wong break; 95f250eedcSDave Chinner } 9675efa57dSDarrick J. Wong 9775efa57dSDarrick J. Wong xfs_buf_relse(agf_bp); 9875efa57dSDarrick J. Wong agf_bp = NULL; 9975efa57dSDarrick J. Wong xfs_buf_relse(agi_bp); 10075efa57dSDarrick J. Wong agi_bp = NULL; 10175efa57dSDarrick J. Wong } 10275efa57dSDarrick J. Wong 10375efa57dSDarrick J. Wong if (agf_bp) 10475efa57dSDarrick J. Wong xfs_buf_relse(agf_bp); 10575efa57dSDarrick J. Wong if (agi_bp) 10675efa57dSDarrick J. Wong xfs_buf_relse(agi_bp); 10775efa57dSDarrick J. Wong if (pag) 10875efa57dSDarrick J. Wong xfs_perag_put(pag); 10975efa57dSDarrick J. Wong return error; 11075efa57dSDarrick J. Wong } 11175efa57dSDarrick J. Wong 11275efa57dSDarrick J. Wong int 11375efa57dSDarrick J. Wong xchk_setup_fscounters( 114026f57ebSDarrick J. Wong struct xfs_scrub *sc) 11575efa57dSDarrick J. Wong { 11675efa57dSDarrick J. Wong struct xchk_fscounters *fsc; 11775efa57dSDarrick J. Wong int error; 11875efa57dSDarrick J. Wong 119707e0ddaSTetsuo Handa sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0); 12075efa57dSDarrick J. Wong if (!sc->buf) 12175efa57dSDarrick J. Wong return -ENOMEM; 12275efa57dSDarrick J. Wong fsc = sc->buf; 12375efa57dSDarrick J. Wong 12475efa57dSDarrick J. Wong xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max); 12575efa57dSDarrick J. Wong 12675efa57dSDarrick J. Wong /* We must get the incore counters set up before we can proceed. */ 12775efa57dSDarrick J. Wong error = xchk_fscount_warmup(sc); 12875efa57dSDarrick J. Wong if (error) 12975efa57dSDarrick J. Wong return error; 13075efa57dSDarrick J. Wong 13175efa57dSDarrick J. Wong /* 13275efa57dSDarrick J. Wong * Pause background reclaim while we're scrubbing to reduce the 13375efa57dSDarrick J. Wong * likelihood of background perturbations to the counters throwing off 13475efa57dSDarrick J. Wong * our calculations. 13575efa57dSDarrick J. Wong */ 13675efa57dSDarrick J. Wong xchk_stop_reaping(sc); 13775efa57dSDarrick J. Wong 13875efa57dSDarrick J. Wong return xchk_trans_alloc(sc, 0); 13975efa57dSDarrick J. Wong } 14075efa57dSDarrick J. Wong 141e147a756SDarrick J. Wong /* Count free space btree blocks manually for pre-lazysbcount filesystems. */ 142e147a756SDarrick J. Wong static int 143e147a756SDarrick J. Wong xchk_fscount_btreeblks( 144e147a756SDarrick J. Wong struct xfs_scrub *sc, 145e147a756SDarrick J. Wong struct xchk_fscounters *fsc, 146e147a756SDarrick J. Wong xfs_agnumber_t agno) 147e147a756SDarrick J. Wong { 148e147a756SDarrick J. Wong xfs_extlen_t blocks; 149e147a756SDarrick J. Wong int error; 150e147a756SDarrick J. Wong 15148c6615cSDarrick J. Wong error = xchk_ag_init_existing(sc, agno, &sc->sa); 152e147a756SDarrick J. Wong if (error) 153*61e0d0ccSDarrick J. Wong goto out_free; 154e147a756SDarrick J. Wong 155e147a756SDarrick J. Wong error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); 156e147a756SDarrick J. Wong if (error) 157e147a756SDarrick J. Wong goto out_free; 158e147a756SDarrick J. Wong fsc->fdblocks += blocks - 1; 159e147a756SDarrick J. Wong 160e147a756SDarrick J. Wong error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); 161e147a756SDarrick J. Wong if (error) 162e147a756SDarrick J. Wong goto out_free; 163e147a756SDarrick J. Wong fsc->fdblocks += blocks - 1; 164e147a756SDarrick J. Wong 165e147a756SDarrick J. Wong out_free: 166e147a756SDarrick J. Wong xchk_ag_free(sc, &sc->sa); 167e147a756SDarrick J. Wong return error; 168e147a756SDarrick J. Wong } 169e147a756SDarrick J. Wong 17075efa57dSDarrick J. Wong /* 17175efa57dSDarrick J. Wong * Calculate what the global in-core counters ought to be from the incore 17275efa57dSDarrick J. Wong * per-AG structure. Callers can compare this to the actual in-core counters 17375efa57dSDarrick J. Wong * to estimate by how much both in-core and on-disk counters need to be 17475efa57dSDarrick J. Wong * adjusted. 17575efa57dSDarrick J. Wong */ 17675efa57dSDarrick J. Wong STATIC int 17775efa57dSDarrick J. Wong xchk_fscount_aggregate_agcounts( 17875efa57dSDarrick J. Wong struct xfs_scrub *sc, 17975efa57dSDarrick J. Wong struct xchk_fscounters *fsc) 18075efa57dSDarrick J. Wong { 18175efa57dSDarrick J. Wong struct xfs_mount *mp = sc->mp; 18275efa57dSDarrick J. Wong struct xfs_perag *pag; 18375efa57dSDarrick J. Wong uint64_t delayed; 18475efa57dSDarrick J. Wong xfs_agnumber_t agno; 18575efa57dSDarrick J. Wong int tries = 8; 1868ef34723SDarrick J. Wong int error = 0; 18775efa57dSDarrick J. Wong 18875efa57dSDarrick J. Wong retry: 18975efa57dSDarrick J. Wong fsc->icount = 0; 19075efa57dSDarrick J. Wong fsc->ifree = 0; 19175efa57dSDarrick J. Wong fsc->fdblocks = 0; 19275efa57dSDarrick J. Wong 193f250eedcSDave Chinner for_each_perag(mp, agno, pag) { 194f250eedcSDave Chinner if (xchk_should_terminate(sc, &error)) 195f250eedcSDave Chinner break; 19675efa57dSDarrick J. Wong 19775efa57dSDarrick J. Wong /* This somehow got unset since the warmup? */ 19875efa57dSDarrick J. Wong if (!pag->pagi_init || !pag->pagf_init) { 199f250eedcSDave Chinner error = -EFSCORRUPTED; 200f250eedcSDave Chinner break; 20175efa57dSDarrick J. Wong } 20275efa57dSDarrick J. Wong 20375efa57dSDarrick J. Wong /* Count all the inodes */ 20475efa57dSDarrick J. Wong fsc->icount += pag->pagi_count; 20575efa57dSDarrick J. Wong fsc->ifree += pag->pagi_freecount; 20675efa57dSDarrick J. Wong 20775efa57dSDarrick J. Wong /* Add up the free/freelist/bnobt/cntbt blocks */ 20875efa57dSDarrick J. Wong fsc->fdblocks += pag->pagf_freeblks; 20975efa57dSDarrick J. Wong fsc->fdblocks += pag->pagf_flcount; 210ebd9027dSDave Chinner if (xfs_has_lazysbcount(sc->mp)) { 21175efa57dSDarrick J. Wong fsc->fdblocks += pag->pagf_btreeblks; 212e147a756SDarrick J. Wong } else { 213e147a756SDarrick J. Wong error = xchk_fscount_btreeblks(sc, fsc, agno); 214f250eedcSDave Chinner if (error) 215e147a756SDarrick J. Wong break; 216e147a756SDarrick J. Wong } 21775efa57dSDarrick J. Wong 21875efa57dSDarrick J. Wong /* 21975efa57dSDarrick J. Wong * Per-AG reservations are taken out of the incore counters, 22075efa57dSDarrick J. Wong * so they must be left out of the free blocks computation. 22175efa57dSDarrick J. Wong */ 22275efa57dSDarrick J. Wong fsc->fdblocks -= pag->pag_meta_resv.ar_reserved; 22375efa57dSDarrick J. Wong fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved; 22475efa57dSDarrick J. Wong 22575efa57dSDarrick J. Wong } 226f250eedcSDave Chinner if (pag) 227f250eedcSDave Chinner xfs_perag_put(pag); 2288ef34723SDarrick J. Wong if (error) 2298ef34723SDarrick J. Wong return error; 2308ef34723SDarrick J. Wong 23175efa57dSDarrick J. Wong /* 23275efa57dSDarrick J. Wong * The global incore space reservation is taken from the incore 23375efa57dSDarrick J. Wong * counters, so leave that out of the computation. 23475efa57dSDarrick J. Wong */ 23575efa57dSDarrick J. Wong fsc->fdblocks -= mp->m_resblks_avail; 23675efa57dSDarrick J. Wong 23775efa57dSDarrick J. Wong /* 23875efa57dSDarrick J. Wong * Delayed allocation reservations are taken out of the incore counters 23975efa57dSDarrick J. Wong * but not recorded on disk, so leave them and their indlen blocks out 24075efa57dSDarrick J. Wong * of the computation. 24175efa57dSDarrick J. Wong */ 24275efa57dSDarrick J. Wong delayed = percpu_counter_sum(&mp->m_delalloc_blks); 24375efa57dSDarrick J. Wong fsc->fdblocks -= delayed; 24475efa57dSDarrick J. Wong 24575efa57dSDarrick J. Wong trace_xchk_fscounters_calc(mp, fsc->icount, fsc->ifree, fsc->fdblocks, 24675efa57dSDarrick J. Wong delayed); 24775efa57dSDarrick J. Wong 24875efa57dSDarrick J. Wong 24975efa57dSDarrick J. Wong /* Bail out if the values we compute are totally nonsense. */ 25075efa57dSDarrick J. Wong if (fsc->icount < fsc->icount_min || fsc->icount > fsc->icount_max || 25175efa57dSDarrick J. Wong fsc->fdblocks > mp->m_sb.sb_dblocks || 25275efa57dSDarrick J. Wong fsc->ifree > fsc->icount_max) 25375efa57dSDarrick J. Wong return -EFSCORRUPTED; 25475efa57dSDarrick J. Wong 25575efa57dSDarrick J. Wong /* 25675efa57dSDarrick J. Wong * If ifree > icount then we probably had some perturbation in the 25775efa57dSDarrick J. Wong * counters while we were calculating things. We'll try a few times 25875efa57dSDarrick J. Wong * to maintain ifree <= icount before giving up. 25975efa57dSDarrick J. Wong */ 26075efa57dSDarrick J. Wong if (fsc->ifree > fsc->icount) { 26175efa57dSDarrick J. Wong if (tries--) 26275efa57dSDarrick J. Wong goto retry; 26375efa57dSDarrick J. Wong xchk_set_incomplete(sc); 26475efa57dSDarrick J. Wong return 0; 26575efa57dSDarrick J. Wong } 26675efa57dSDarrick J. Wong 26775efa57dSDarrick J. Wong return 0; 26875efa57dSDarrick J. Wong } 26975efa57dSDarrick J. Wong 27075efa57dSDarrick J. Wong /* 27175efa57dSDarrick J. Wong * Is the @counter reasonably close to the @expected value? 27275efa57dSDarrick J. Wong * 27375efa57dSDarrick J. Wong * We neither locked nor froze anything in the filesystem while aggregating the 27475efa57dSDarrick J. Wong * per-AG data to compute the @expected value, which means that the counter 27575efa57dSDarrick J. Wong * could have changed. We know the @old_value of the summation of the counter 27675efa57dSDarrick J. Wong * before the aggregation, and we re-sum the counter now. If the expected 27775efa57dSDarrick J. Wong * value falls between the two summations, we're ok. 27875efa57dSDarrick J. Wong * 27975efa57dSDarrick J. Wong * Otherwise, we /might/ have a problem. If the change in the summations is 28075efa57dSDarrick J. Wong * more than we want to tolerate, the filesystem is probably busy and we should 28175efa57dSDarrick J. Wong * just send back INCOMPLETE and see if userspace will try again. 28275efa57dSDarrick J. Wong */ 28375efa57dSDarrick J. Wong static inline bool 28475efa57dSDarrick J. Wong xchk_fscount_within_range( 28575efa57dSDarrick J. Wong struct xfs_scrub *sc, 28675efa57dSDarrick J. Wong const int64_t old_value, 28775efa57dSDarrick J. Wong struct percpu_counter *counter, 28875efa57dSDarrick J. Wong uint64_t expected) 28975efa57dSDarrick J. Wong { 29075efa57dSDarrick J. Wong int64_t min_value, max_value; 29175efa57dSDarrick J. Wong int64_t curr_value = percpu_counter_sum(counter); 29275efa57dSDarrick J. Wong 29375efa57dSDarrick J. Wong trace_xchk_fscounters_within_range(sc->mp, expected, curr_value, 29475efa57dSDarrick J. Wong old_value); 29575efa57dSDarrick J. Wong 29675efa57dSDarrick J. Wong /* Negative values are always wrong. */ 29775efa57dSDarrick J. Wong if (curr_value < 0) 29875efa57dSDarrick J. Wong return false; 29975efa57dSDarrick J. Wong 30075efa57dSDarrick J. Wong /* Exact matches are always ok. */ 30175efa57dSDarrick J. Wong if (curr_value == expected) 30275efa57dSDarrick J. Wong return true; 30375efa57dSDarrick J. Wong 30475efa57dSDarrick J. Wong min_value = min(old_value, curr_value); 30575efa57dSDarrick J. Wong max_value = max(old_value, curr_value); 30675efa57dSDarrick J. Wong 30775efa57dSDarrick J. Wong /* Within the before-and-after range is ok. */ 30875efa57dSDarrick J. Wong if (expected >= min_value && expected <= max_value) 30975efa57dSDarrick J. Wong return true; 31075efa57dSDarrick J. Wong 31175efa57dSDarrick J. Wong /* 31275efa57dSDarrick J. Wong * If the difference between the two summations is too large, the fs 31375efa57dSDarrick J. Wong * might just be busy and so we'll mark the scrub incomplete. Return 31475efa57dSDarrick J. Wong * true here so that we don't mark the counter corrupt. 31575efa57dSDarrick J. Wong * 31675efa57dSDarrick J. Wong * XXX: In the future when userspace can grant scrub permission to 31775efa57dSDarrick J. Wong * quiesce the filesystem to solve the outsized variance problem, this 31875efa57dSDarrick J. Wong * check should be moved up and the return code changed to signal to 31975efa57dSDarrick J. Wong * userspace that we need quiesce permission. 32075efa57dSDarrick J. Wong */ 32175efa57dSDarrick J. Wong if (max_value - min_value >= XCHK_FSCOUNT_MIN_VARIANCE) { 32275efa57dSDarrick J. Wong xchk_set_incomplete(sc); 32375efa57dSDarrick J. Wong return true; 32475efa57dSDarrick J. Wong } 32575efa57dSDarrick J. Wong 32675efa57dSDarrick J. Wong return false; 32775efa57dSDarrick J. Wong } 32875efa57dSDarrick J. Wong 32975efa57dSDarrick J. Wong /* Check the superblock counters. */ 33075efa57dSDarrick J. Wong int 33175efa57dSDarrick J. Wong xchk_fscounters( 33275efa57dSDarrick J. Wong struct xfs_scrub *sc) 33375efa57dSDarrick J. Wong { 33475efa57dSDarrick J. Wong struct xfs_mount *mp = sc->mp; 33575efa57dSDarrick J. Wong struct xchk_fscounters *fsc = sc->buf; 33675efa57dSDarrick J. Wong int64_t icount, ifree, fdblocks; 33775efa57dSDarrick J. Wong int error; 33875efa57dSDarrick J. Wong 33975efa57dSDarrick J. Wong /* Snapshot the percpu counters. */ 34075efa57dSDarrick J. Wong icount = percpu_counter_sum(&mp->m_icount); 34175efa57dSDarrick J. Wong ifree = percpu_counter_sum(&mp->m_ifree); 34275efa57dSDarrick J. Wong fdblocks = percpu_counter_sum(&mp->m_fdblocks); 34375efa57dSDarrick J. Wong 34475efa57dSDarrick J. Wong /* No negative values, please! */ 34575efa57dSDarrick J. Wong if (icount < 0 || ifree < 0 || fdblocks < 0) 34675efa57dSDarrick J. Wong xchk_set_corrupt(sc); 34775efa57dSDarrick J. Wong 34875efa57dSDarrick J. Wong /* See if icount is obviously wrong. */ 34975efa57dSDarrick J. Wong if (icount < fsc->icount_min || icount > fsc->icount_max) 35075efa57dSDarrick J. Wong xchk_set_corrupt(sc); 35175efa57dSDarrick J. Wong 35275efa57dSDarrick J. Wong /* See if fdblocks is obviously wrong. */ 35375efa57dSDarrick J. Wong if (fdblocks > mp->m_sb.sb_dblocks) 35475efa57dSDarrick J. Wong xchk_set_corrupt(sc); 35575efa57dSDarrick J. Wong 35675efa57dSDarrick J. Wong /* 35775efa57dSDarrick J. Wong * If ifree exceeds icount by more than the minimum variance then 35875efa57dSDarrick J. Wong * something's probably wrong with the counters. 35975efa57dSDarrick J. Wong */ 36075efa57dSDarrick J. Wong if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE) 36175efa57dSDarrick J. Wong xchk_set_corrupt(sc); 36275efa57dSDarrick J. Wong 36375efa57dSDarrick J. Wong /* Walk the incore AG headers to calculate the expected counters. */ 36475efa57dSDarrick J. Wong error = xchk_fscount_aggregate_agcounts(sc, fsc); 36575efa57dSDarrick J. Wong if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error)) 36675efa57dSDarrick J. Wong return error; 36775efa57dSDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) 36875efa57dSDarrick J. Wong return 0; 36975efa57dSDarrick J. Wong 37075efa57dSDarrick J. Wong /* Compare the in-core counters with whatever we counted. */ 37175efa57dSDarrick J. Wong if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount)) 37275efa57dSDarrick J. Wong xchk_set_corrupt(sc); 37375efa57dSDarrick J. Wong 37475efa57dSDarrick J. Wong if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree)) 37575efa57dSDarrick J. Wong xchk_set_corrupt(sc); 37675efa57dSDarrick J. Wong 37775efa57dSDarrick J. Wong if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks, 37875efa57dSDarrick J. Wong fsc->fdblocks)) 37975efa57dSDarrick J. Wong xchk_set_corrupt(sc); 38075efa57dSDarrick J. Wong 38175efa57dSDarrick J. Wong return 0; 38275efa57dSDarrick J. Wong } 383