175efa57dSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0+ 275efa57dSDarrick J. Wong /* 375efa57dSDarrick J. Wong * Copyright (C) 2019 Oracle. All Rights Reserved. 475efa57dSDarrick J. Wong * Author: Darrick J. Wong <darrick.wong@oracle.com> 575efa57dSDarrick J. Wong */ 675efa57dSDarrick J. Wong #include "xfs.h" 775efa57dSDarrick J. Wong #include "xfs_fs.h" 875efa57dSDarrick J. Wong #include "xfs_shared.h" 975efa57dSDarrick J. Wong #include "xfs_format.h" 1075efa57dSDarrick J. Wong #include "xfs_trans_resv.h" 1175efa57dSDarrick J. Wong #include "xfs_mount.h" 1275efa57dSDarrick J. Wong #include "xfs_sb.h" 1375efa57dSDarrick J. Wong #include "xfs_alloc.h" 1475efa57dSDarrick J. Wong #include "xfs_ialloc.h" 1575efa57dSDarrick J. Wong #include "xfs_health.h" 1675efa57dSDarrick J. Wong #include "scrub/scrub.h" 1775efa57dSDarrick J. Wong #include "scrub/common.h" 1875efa57dSDarrick J. Wong #include "scrub/trace.h" 1975efa57dSDarrick J. Wong 2075efa57dSDarrick J. Wong /* 2175efa57dSDarrick J. Wong * FS Summary Counters 2275efa57dSDarrick J. Wong * =================== 2375efa57dSDarrick J. Wong * 2475efa57dSDarrick J. Wong * The basics of filesystem summary counter checking are that we iterate the 2575efa57dSDarrick J. Wong * AGs counting the number of free blocks, free space btree blocks, per-AG 2675efa57dSDarrick J. Wong * reservations, inodes, delayed allocation reservations, and free inodes. 2775efa57dSDarrick J. Wong * Then we compare what we computed against the in-core counters. 2875efa57dSDarrick J. Wong * 2975efa57dSDarrick J. Wong * However, the reality is that summary counters are a tricky beast to check. 3075efa57dSDarrick J. Wong * While we /could/ freeze the filesystem and scramble around the AGs counting 3175efa57dSDarrick J. Wong * the free blocks, in practice we prefer not do that for a scan because 3275efa57dSDarrick J. Wong * freezing is costly. To get around this, we added a per-cpu counter of the 3375efa57dSDarrick J. Wong * delalloc reservations so that we can rotor around the AGs relatively 3475efa57dSDarrick J. Wong * quickly, and we allow the counts to be slightly off because we're not taking 3575efa57dSDarrick J. Wong * any locks while we do this. 3675efa57dSDarrick J. Wong * 3775efa57dSDarrick J. Wong * So the first thing we do is warm up the buffer cache in the setup routine by 3875efa57dSDarrick J. Wong * walking all the AGs to make sure the incore per-AG structure has been 3975efa57dSDarrick J. Wong * initialized. The expected value calculation then iterates the incore per-AG 4075efa57dSDarrick J. Wong * structures as quickly as it can. We snapshot the percpu counters before and 4175efa57dSDarrick J. Wong * after this operation and use the difference in counter values to guess at 4275efa57dSDarrick J. Wong * our tolerance for mismatch between expected and actual counter values. 4375efa57dSDarrick J. Wong */ 4475efa57dSDarrick J. Wong 4575efa57dSDarrick J. Wong /* 4675efa57dSDarrick J. Wong * Since the expected value computation is lockless but only browses incore 4775efa57dSDarrick J. Wong * values, the percpu counters should be fairly close to each other. However, 4875efa57dSDarrick J. Wong * we'll allow ourselves to be off by at least this (arbitrary) amount. 4975efa57dSDarrick J. Wong */ 5075efa57dSDarrick J. Wong #define XCHK_FSCOUNT_MIN_VARIANCE (512) 5175efa57dSDarrick J. Wong 5275efa57dSDarrick J. Wong /* 5375efa57dSDarrick J. Wong * Make sure the per-AG structure has been initialized from the on-disk header 5475efa57dSDarrick J. Wong * contents and trust that the incore counters match the ondisk counters. (The 5575efa57dSDarrick J. Wong * AGF and AGI scrubbers check them, and a normal xfs_scrub run checks the 5675efa57dSDarrick J. Wong * summary counters after checking all AG headers). Do this from the setup 5775efa57dSDarrick J. Wong * function so that the inner AG aggregation loop runs as quickly as possible. 5875efa57dSDarrick J. Wong * 5975efa57dSDarrick J. Wong * This function runs during the setup phase /before/ we start checking any 6075efa57dSDarrick J. Wong * metadata. 6175efa57dSDarrick J. Wong */ 6275efa57dSDarrick J. Wong STATIC int 6375efa57dSDarrick J. Wong xchk_fscount_warmup( 6475efa57dSDarrick J. Wong struct xfs_scrub *sc) 6575efa57dSDarrick J. Wong { 6675efa57dSDarrick J. Wong struct xfs_mount *mp = sc->mp; 6775efa57dSDarrick J. Wong struct xfs_buf *agi_bp = NULL; 6875efa57dSDarrick J. Wong struct xfs_buf *agf_bp = NULL; 6975efa57dSDarrick J. Wong struct xfs_perag *pag = NULL; 7075efa57dSDarrick J. Wong xfs_agnumber_t agno; 7175efa57dSDarrick J. Wong int error = 0; 7275efa57dSDarrick J. Wong 7375efa57dSDarrick J. Wong for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 7475efa57dSDarrick J. Wong pag = xfs_perag_get(mp, agno); 7575efa57dSDarrick J. Wong 7675efa57dSDarrick J. Wong if (pag->pagi_init && pag->pagf_init) 7775efa57dSDarrick J. Wong goto next_loop_perag; 7875efa57dSDarrick J. Wong 7975efa57dSDarrick J. Wong /* Lock both AG headers. */ 8075efa57dSDarrick J. Wong error = xfs_ialloc_read_agi(mp, sc->tp, agno, &agi_bp); 8175efa57dSDarrick J. Wong if (error) 8275efa57dSDarrick J. Wong break; 8375efa57dSDarrick J. Wong error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &agf_bp); 8475efa57dSDarrick J. Wong if (error) 8575efa57dSDarrick J. Wong break; 8675efa57dSDarrick J. Wong error = -ENOMEM; 8775efa57dSDarrick J. Wong if (!agf_bp || !agi_bp) 8875efa57dSDarrick J. Wong break; 8975efa57dSDarrick J. Wong 9075efa57dSDarrick J. Wong /* 9175efa57dSDarrick J. Wong * These are supposed to be initialized by the header read 9275efa57dSDarrick J. Wong * function. 9375efa57dSDarrick J. Wong */ 9475efa57dSDarrick J. Wong error = -EFSCORRUPTED; 9575efa57dSDarrick J. Wong if (!pag->pagi_init || !pag->pagf_init) 9675efa57dSDarrick J. Wong break; 9775efa57dSDarrick J. Wong 9875efa57dSDarrick J. Wong xfs_buf_relse(agf_bp); 9975efa57dSDarrick J. Wong agf_bp = NULL; 10075efa57dSDarrick J. Wong xfs_buf_relse(agi_bp); 10175efa57dSDarrick J. Wong agi_bp = NULL; 10275efa57dSDarrick J. Wong next_loop_perag: 10375efa57dSDarrick J. Wong xfs_perag_put(pag); 10475efa57dSDarrick J. Wong pag = NULL; 10575efa57dSDarrick J. Wong error = 0; 10675efa57dSDarrick J. Wong 107*8ef34723SDarrick J. Wong if (xchk_should_terminate(sc, &error)) 10875efa57dSDarrick J. Wong break; 10975efa57dSDarrick J. Wong } 11075efa57dSDarrick J. Wong 11175efa57dSDarrick J. Wong if (agf_bp) 11275efa57dSDarrick J. Wong xfs_buf_relse(agf_bp); 11375efa57dSDarrick J. Wong if (agi_bp) 11475efa57dSDarrick J. Wong xfs_buf_relse(agi_bp); 11575efa57dSDarrick J. Wong if (pag) 11675efa57dSDarrick J. Wong xfs_perag_put(pag); 11775efa57dSDarrick J. Wong return error; 11875efa57dSDarrick J. Wong } 11975efa57dSDarrick J. Wong 12075efa57dSDarrick J. Wong int 12175efa57dSDarrick J. Wong xchk_setup_fscounters( 12275efa57dSDarrick J. Wong struct xfs_scrub *sc, 12375efa57dSDarrick J. Wong struct xfs_inode *ip) 12475efa57dSDarrick J. Wong { 12575efa57dSDarrick J. Wong struct xchk_fscounters *fsc; 12675efa57dSDarrick J. Wong int error; 12775efa57dSDarrick J. Wong 128707e0ddaSTetsuo Handa sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0); 12975efa57dSDarrick J. Wong if (!sc->buf) 13075efa57dSDarrick J. Wong return -ENOMEM; 13175efa57dSDarrick J. Wong fsc = sc->buf; 13275efa57dSDarrick J. Wong 13375efa57dSDarrick J. Wong xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max); 13475efa57dSDarrick J. Wong 13575efa57dSDarrick J. Wong /* We must get the incore counters set up before we can proceed. */ 13675efa57dSDarrick J. Wong error = xchk_fscount_warmup(sc); 13775efa57dSDarrick J. Wong if (error) 13875efa57dSDarrick J. Wong return error; 13975efa57dSDarrick J. Wong 14075efa57dSDarrick J. Wong /* 14175efa57dSDarrick J. Wong * Pause background reclaim while we're scrubbing to reduce the 14275efa57dSDarrick J. Wong * likelihood of background perturbations to the counters throwing off 14375efa57dSDarrick J. Wong * our calculations. 14475efa57dSDarrick J. Wong */ 14575efa57dSDarrick J. Wong xchk_stop_reaping(sc); 14675efa57dSDarrick J. Wong 14775efa57dSDarrick J. Wong return xchk_trans_alloc(sc, 0); 14875efa57dSDarrick J. Wong } 14975efa57dSDarrick J. Wong 15075efa57dSDarrick J. Wong /* 15175efa57dSDarrick J. Wong * Calculate what the global in-core counters ought to be from the incore 15275efa57dSDarrick J. Wong * per-AG structure. Callers can compare this to the actual in-core counters 15375efa57dSDarrick J. Wong * to estimate by how much both in-core and on-disk counters need to be 15475efa57dSDarrick J. Wong * adjusted. 15575efa57dSDarrick J. Wong */ 15675efa57dSDarrick J. Wong STATIC int 15775efa57dSDarrick J. Wong xchk_fscount_aggregate_agcounts( 15875efa57dSDarrick J. Wong struct xfs_scrub *sc, 15975efa57dSDarrick J. Wong struct xchk_fscounters *fsc) 16075efa57dSDarrick J. Wong { 16175efa57dSDarrick J. Wong struct xfs_mount *mp = sc->mp; 16275efa57dSDarrick J. Wong struct xfs_perag *pag; 16375efa57dSDarrick J. Wong uint64_t delayed; 16475efa57dSDarrick J. Wong xfs_agnumber_t agno; 16575efa57dSDarrick J. Wong int tries = 8; 166*8ef34723SDarrick J. Wong int error = 0; 16775efa57dSDarrick J. Wong 16875efa57dSDarrick J. Wong retry: 16975efa57dSDarrick J. Wong fsc->icount = 0; 17075efa57dSDarrick J. Wong fsc->ifree = 0; 17175efa57dSDarrick J. Wong fsc->fdblocks = 0; 17275efa57dSDarrick J. Wong 17375efa57dSDarrick J. Wong for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 17475efa57dSDarrick J. Wong pag = xfs_perag_get(mp, agno); 17575efa57dSDarrick J. Wong 17675efa57dSDarrick J. Wong /* This somehow got unset since the warmup? */ 17775efa57dSDarrick J. Wong if (!pag->pagi_init || !pag->pagf_init) { 17875efa57dSDarrick J. Wong xfs_perag_put(pag); 17975efa57dSDarrick J. Wong return -EFSCORRUPTED; 18075efa57dSDarrick J. Wong } 18175efa57dSDarrick J. Wong 18275efa57dSDarrick J. Wong /* Count all the inodes */ 18375efa57dSDarrick J. Wong fsc->icount += pag->pagi_count; 18475efa57dSDarrick J. Wong fsc->ifree += pag->pagi_freecount; 18575efa57dSDarrick J. Wong 18675efa57dSDarrick J. Wong /* Add up the free/freelist/bnobt/cntbt blocks */ 18775efa57dSDarrick J. Wong fsc->fdblocks += pag->pagf_freeblks; 18875efa57dSDarrick J. Wong fsc->fdblocks += pag->pagf_flcount; 18975efa57dSDarrick J. Wong fsc->fdblocks += pag->pagf_btreeblks; 19075efa57dSDarrick J. Wong 19175efa57dSDarrick J. Wong /* 19275efa57dSDarrick J. Wong * Per-AG reservations are taken out of the incore counters, 19375efa57dSDarrick J. Wong * so they must be left out of the free blocks computation. 19475efa57dSDarrick J. Wong */ 19575efa57dSDarrick J. Wong fsc->fdblocks -= pag->pag_meta_resv.ar_reserved; 19675efa57dSDarrick J. Wong fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved; 19775efa57dSDarrick J. Wong 19875efa57dSDarrick J. Wong xfs_perag_put(pag); 19975efa57dSDarrick J. Wong 200*8ef34723SDarrick J. Wong if (xchk_should_terminate(sc, &error)) 20175efa57dSDarrick J. Wong break; 20275efa57dSDarrick J. Wong } 20375efa57dSDarrick J. Wong 204*8ef34723SDarrick J. Wong if (error) 205*8ef34723SDarrick J. Wong return error; 206*8ef34723SDarrick J. Wong 20775efa57dSDarrick J. Wong /* 20875efa57dSDarrick J. Wong * The global incore space reservation is taken from the incore 20975efa57dSDarrick J. Wong * counters, so leave that out of the computation. 21075efa57dSDarrick J. Wong */ 21175efa57dSDarrick J. Wong fsc->fdblocks -= mp->m_resblks_avail; 21275efa57dSDarrick J. Wong 21375efa57dSDarrick J. Wong /* 21475efa57dSDarrick J. Wong * Delayed allocation reservations are taken out of the incore counters 21575efa57dSDarrick J. Wong * but not recorded on disk, so leave them and their indlen blocks out 21675efa57dSDarrick J. Wong * of the computation. 21775efa57dSDarrick J. Wong */ 21875efa57dSDarrick J. Wong delayed = percpu_counter_sum(&mp->m_delalloc_blks); 21975efa57dSDarrick J. Wong fsc->fdblocks -= delayed; 22075efa57dSDarrick J. Wong 22175efa57dSDarrick J. Wong trace_xchk_fscounters_calc(mp, fsc->icount, fsc->ifree, fsc->fdblocks, 22275efa57dSDarrick J. Wong delayed); 22375efa57dSDarrick J. Wong 22475efa57dSDarrick J. Wong 22575efa57dSDarrick J. Wong /* Bail out if the values we compute are totally nonsense. */ 22675efa57dSDarrick J. Wong if (fsc->icount < fsc->icount_min || fsc->icount > fsc->icount_max || 22775efa57dSDarrick J. Wong fsc->fdblocks > mp->m_sb.sb_dblocks || 22875efa57dSDarrick J. Wong fsc->ifree > fsc->icount_max) 22975efa57dSDarrick J. Wong return -EFSCORRUPTED; 23075efa57dSDarrick J. Wong 23175efa57dSDarrick J. Wong /* 23275efa57dSDarrick J. Wong * If ifree > icount then we probably had some perturbation in the 23375efa57dSDarrick J. Wong * counters while we were calculating things. We'll try a few times 23475efa57dSDarrick J. Wong * to maintain ifree <= icount before giving up. 23575efa57dSDarrick J. Wong */ 23675efa57dSDarrick J. Wong if (fsc->ifree > fsc->icount) { 23775efa57dSDarrick J. Wong if (tries--) 23875efa57dSDarrick J. Wong goto retry; 23975efa57dSDarrick J. Wong xchk_set_incomplete(sc); 24075efa57dSDarrick J. Wong return 0; 24175efa57dSDarrick J. Wong } 24275efa57dSDarrick J. Wong 24375efa57dSDarrick J. Wong return 0; 24475efa57dSDarrick J. Wong } 24575efa57dSDarrick J. Wong 24675efa57dSDarrick J. Wong /* 24775efa57dSDarrick J. Wong * Is the @counter reasonably close to the @expected value? 24875efa57dSDarrick J. Wong * 24975efa57dSDarrick J. Wong * We neither locked nor froze anything in the filesystem while aggregating the 25075efa57dSDarrick J. Wong * per-AG data to compute the @expected value, which means that the counter 25175efa57dSDarrick J. Wong * could have changed. We know the @old_value of the summation of the counter 25275efa57dSDarrick J. Wong * before the aggregation, and we re-sum the counter now. If the expected 25375efa57dSDarrick J. Wong * value falls between the two summations, we're ok. 25475efa57dSDarrick J. Wong * 25575efa57dSDarrick J. Wong * Otherwise, we /might/ have a problem. If the change in the summations is 25675efa57dSDarrick J. Wong * more than we want to tolerate, the filesystem is probably busy and we should 25775efa57dSDarrick J. Wong * just send back INCOMPLETE and see if userspace will try again. 25875efa57dSDarrick J. Wong */ 25975efa57dSDarrick J. Wong static inline bool 26075efa57dSDarrick J. Wong xchk_fscount_within_range( 26175efa57dSDarrick J. Wong struct xfs_scrub *sc, 26275efa57dSDarrick J. Wong const int64_t old_value, 26375efa57dSDarrick J. Wong struct percpu_counter *counter, 26475efa57dSDarrick J. Wong uint64_t expected) 26575efa57dSDarrick J. Wong { 26675efa57dSDarrick J. Wong int64_t min_value, max_value; 26775efa57dSDarrick J. Wong int64_t curr_value = percpu_counter_sum(counter); 26875efa57dSDarrick J. Wong 26975efa57dSDarrick J. Wong trace_xchk_fscounters_within_range(sc->mp, expected, curr_value, 27075efa57dSDarrick J. Wong old_value); 27175efa57dSDarrick J. Wong 27275efa57dSDarrick J. Wong /* Negative values are always wrong. */ 27375efa57dSDarrick J. Wong if (curr_value < 0) 27475efa57dSDarrick J. Wong return false; 27575efa57dSDarrick J. Wong 27675efa57dSDarrick J. Wong /* Exact matches are always ok. */ 27775efa57dSDarrick J. Wong if (curr_value == expected) 27875efa57dSDarrick J. Wong return true; 27975efa57dSDarrick J. Wong 28075efa57dSDarrick J. Wong min_value = min(old_value, curr_value); 28175efa57dSDarrick J. Wong max_value = max(old_value, curr_value); 28275efa57dSDarrick J. Wong 28375efa57dSDarrick J. Wong /* Within the before-and-after range is ok. */ 28475efa57dSDarrick J. Wong if (expected >= min_value && expected <= max_value) 28575efa57dSDarrick J. Wong return true; 28675efa57dSDarrick J. Wong 28775efa57dSDarrick J. Wong /* 28875efa57dSDarrick J. Wong * If the difference between the two summations is too large, the fs 28975efa57dSDarrick J. Wong * might just be busy and so we'll mark the scrub incomplete. Return 29075efa57dSDarrick J. Wong * true here so that we don't mark the counter corrupt. 29175efa57dSDarrick J. Wong * 29275efa57dSDarrick J. Wong * XXX: In the future when userspace can grant scrub permission to 29375efa57dSDarrick J. Wong * quiesce the filesystem to solve the outsized variance problem, this 29475efa57dSDarrick J. Wong * check should be moved up and the return code changed to signal to 29575efa57dSDarrick J. Wong * userspace that we need quiesce permission. 29675efa57dSDarrick J. Wong */ 29775efa57dSDarrick J. Wong if (max_value - min_value >= XCHK_FSCOUNT_MIN_VARIANCE) { 29875efa57dSDarrick J. Wong xchk_set_incomplete(sc); 29975efa57dSDarrick J. Wong return true; 30075efa57dSDarrick J. Wong } 30175efa57dSDarrick J. Wong 30275efa57dSDarrick J. Wong return false; 30375efa57dSDarrick J. Wong } 30475efa57dSDarrick J. Wong 30575efa57dSDarrick J. Wong /* Check the superblock counters. */ 30675efa57dSDarrick J. Wong int 30775efa57dSDarrick J. Wong xchk_fscounters( 30875efa57dSDarrick J. Wong struct xfs_scrub *sc) 30975efa57dSDarrick J. Wong { 31075efa57dSDarrick J. Wong struct xfs_mount *mp = sc->mp; 31175efa57dSDarrick J. Wong struct xchk_fscounters *fsc = sc->buf; 31275efa57dSDarrick J. Wong int64_t icount, ifree, fdblocks; 31375efa57dSDarrick J. Wong int error; 31475efa57dSDarrick J. Wong 31575efa57dSDarrick J. Wong /* Snapshot the percpu counters. */ 31675efa57dSDarrick J. Wong icount = percpu_counter_sum(&mp->m_icount); 31775efa57dSDarrick J. Wong ifree = percpu_counter_sum(&mp->m_ifree); 31875efa57dSDarrick J. Wong fdblocks = percpu_counter_sum(&mp->m_fdblocks); 31975efa57dSDarrick J. Wong 32075efa57dSDarrick J. Wong /* No negative values, please! */ 32175efa57dSDarrick J. Wong if (icount < 0 || ifree < 0 || fdblocks < 0) 32275efa57dSDarrick J. Wong xchk_set_corrupt(sc); 32375efa57dSDarrick J. Wong 32475efa57dSDarrick J. Wong /* See if icount is obviously wrong. */ 32575efa57dSDarrick J. Wong if (icount < fsc->icount_min || icount > fsc->icount_max) 32675efa57dSDarrick J. Wong xchk_set_corrupt(sc); 32775efa57dSDarrick J. Wong 32875efa57dSDarrick J. Wong /* See if fdblocks is obviously wrong. */ 32975efa57dSDarrick J. Wong if (fdblocks > mp->m_sb.sb_dblocks) 33075efa57dSDarrick J. Wong xchk_set_corrupt(sc); 33175efa57dSDarrick J. Wong 33275efa57dSDarrick J. Wong /* 33375efa57dSDarrick J. Wong * If ifree exceeds icount by more than the minimum variance then 33475efa57dSDarrick J. Wong * something's probably wrong with the counters. 33575efa57dSDarrick J. Wong */ 33675efa57dSDarrick J. Wong if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE) 33775efa57dSDarrick J. Wong xchk_set_corrupt(sc); 33875efa57dSDarrick J. Wong 33975efa57dSDarrick J. Wong /* Walk the incore AG headers to calculate the expected counters. */ 34075efa57dSDarrick J. Wong error = xchk_fscount_aggregate_agcounts(sc, fsc); 34175efa57dSDarrick J. Wong if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error)) 34275efa57dSDarrick J. Wong return error; 34375efa57dSDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) 34475efa57dSDarrick J. Wong return 0; 34575efa57dSDarrick J. Wong 34675efa57dSDarrick J. Wong /* Compare the in-core counters with whatever we counted. */ 34775efa57dSDarrick J. Wong if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount)) 34875efa57dSDarrick J. Wong xchk_set_corrupt(sc); 34975efa57dSDarrick J. Wong 35075efa57dSDarrick J. Wong if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree)) 35175efa57dSDarrick J. Wong xchk_set_corrupt(sc); 35275efa57dSDarrick J. Wong 35375efa57dSDarrick J. Wong if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks, 35475efa57dSDarrick J. Wong fsc->fdblocks)) 35575efa57dSDarrick J. Wong xchk_set_corrupt(sc); 35675efa57dSDarrick J. Wong 35775efa57dSDarrick J. Wong return 0; 35875efa57dSDarrick J. Wong } 359