10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0+ 2edc09b52SDarrick J. Wong /* 3edc09b52SDarrick J. Wong * Copyright (C) 2017 Oracle. All Rights Reserved. 4edc09b52SDarrick J. Wong * Author: Darrick J. Wong <darrick.wong@oracle.com> 5edc09b52SDarrick J. Wong */ 6edc09b52SDarrick J. Wong #include "xfs.h" 7edc09b52SDarrick J. Wong #include "xfs_fs.h" 8edc09b52SDarrick J. Wong #include "xfs_shared.h" 9edc09b52SDarrick J. Wong #include "xfs_format.h" 10edc09b52SDarrick J. Wong #include "xfs_btree.h" 11edc09b52SDarrick J. Wong #include "xfs_rmap.h" 12f6d5fc21SDarrick J. Wong #include "xfs_refcount.h" 13edc09b52SDarrick J. Wong #include "scrub/scrub.h" 14edc09b52SDarrick J. Wong #include "scrub/common.h" 15edc09b52SDarrick J. Wong #include "scrub/btree.h" 1650f02fe3SDave Chinner #include "xfs_ag.h" 17edc09b52SDarrick J. Wong 18edc09b52SDarrick J. Wong /* 19edc09b52SDarrick J. Wong * Set us up to scrub reference count btrees. 20edc09b52SDarrick J. Wong */ 21edc09b52SDarrick J. Wong int 22c517b3aaSDarrick J. Wong xchk_setup_ag_refcountbt( 23026f57ebSDarrick J. Wong struct xfs_scrub *sc) 24edc09b52SDarrick J. Wong { 25026f57ebSDarrick J. Wong return xchk_setup_ag_btree(sc, false); 26edc09b52SDarrick J. Wong } 27edc09b52SDarrick J. Wong 28edc09b52SDarrick J. Wong /* Reference count btree scrubber. */ 29edc09b52SDarrick J. Wong 30dbde19daSDarrick J. Wong /* 31dbde19daSDarrick J. Wong * Confirming Reference Counts via Reverse Mappings 32dbde19daSDarrick J. Wong * 33dbde19daSDarrick J. Wong * We want to count the reverse mappings overlapping a refcount record 34dbde19daSDarrick J. Wong * (bno, len, refcount), allowing for the possibility that some of the 35dbde19daSDarrick J. Wong * overlap may come from smaller adjoining reverse mappings, while some 36dbde19daSDarrick J. Wong * comes from single extents which overlap the range entirely. The 37dbde19daSDarrick J. Wong * outer loop is as follows: 38dbde19daSDarrick J. Wong * 39dbde19daSDarrick J. Wong * 1. For all reverse mappings overlapping the refcount extent, 40dbde19daSDarrick J. Wong * a. If a given rmap completely overlaps, mark it as seen. 41dbde19daSDarrick J. Wong * b. Otherwise, record the fragment (in agbno order) for later 42dbde19daSDarrick J. Wong * processing. 43dbde19daSDarrick J. Wong * 44dbde19daSDarrick J. Wong * Once we've seen all the rmaps, we know that for all blocks in the 45dbde19daSDarrick J. Wong * refcount record we want to find $refcount owners and we've already 46dbde19daSDarrick J. Wong * visited $seen extents that overlap all the blocks. Therefore, we 47dbde19daSDarrick J. Wong * need to find ($refcount - $seen) owners for every block in the 48dbde19daSDarrick J. Wong * extent; call that quantity $target_nr. Proceed as follows: 49dbde19daSDarrick J. Wong * 50dbde19daSDarrick J. Wong * 2. Pull the first $target_nr fragments from the list; all of them 51dbde19daSDarrick J. Wong * should start at or before the start of the extent. 52dbde19daSDarrick J. Wong * Call this subset of fragments the working set. 53dbde19daSDarrick J. Wong * 3. Until there are no more unprocessed fragments, 54dbde19daSDarrick J. Wong * a. Find the shortest fragments in the set and remove them. 55dbde19daSDarrick J. Wong * b. Note the block number of the end of these fragments. 56dbde19daSDarrick J. Wong * c. Pull the same number of fragments from the list. All of these 57dbde19daSDarrick J. Wong * fragments should start at the block number recorded in the 58dbde19daSDarrick J. Wong * previous step. 59dbde19daSDarrick J. Wong * d. Put those fragments in the set. 60dbde19daSDarrick J. Wong * 4. Check that there are $target_nr fragments remaining in the list, 61dbde19daSDarrick J. Wong * and that they all end at or beyond the end of the refcount extent. 62dbde19daSDarrick J. Wong * 63dbde19daSDarrick J. Wong * If the refcount is correct, all the check conditions in the algorithm 64dbde19daSDarrick J. Wong * should always hold true. If not, the refcount is incorrect. 65dbde19daSDarrick J. Wong */ 66c517b3aaSDarrick J. Wong struct xchk_refcnt_frag { 67dbde19daSDarrick J. Wong struct list_head list; 68dbde19daSDarrick J. Wong struct xfs_rmap_irec rm; 69dbde19daSDarrick J. Wong }; 70dbde19daSDarrick J. Wong 71c517b3aaSDarrick J. Wong struct xchk_refcnt_check { 721d8a748aSDarrick J. Wong struct xfs_scrub *sc; 73dbde19daSDarrick J. Wong struct list_head fragments; 74dbde19daSDarrick J. Wong 75dbde19daSDarrick J. Wong /* refcount extent we're examining */ 76dbde19daSDarrick J. Wong xfs_agblock_t bno; 77dbde19daSDarrick J. Wong xfs_extlen_t len; 78dbde19daSDarrick J. Wong xfs_nlink_t refcount; 79dbde19daSDarrick J. Wong 80dbde19daSDarrick J. Wong /* number of owners seen */ 81dbde19daSDarrick J. Wong xfs_nlink_t seen; 82dbde19daSDarrick J. Wong }; 83dbde19daSDarrick J. Wong 84dbde19daSDarrick J. Wong /* 85dbde19daSDarrick J. Wong * Decide if the given rmap is large enough that we can redeem it 86dbde19daSDarrick J. Wong * towards refcount verification now, or if it's a fragment, in 87dbde19daSDarrick J. Wong * which case we'll hang onto it in the hopes that we'll later 88dbde19daSDarrick J. Wong * discover that we've collected exactly the correct number of 89dbde19daSDarrick J. Wong * fragments as the refcountbt says we should have. 90dbde19daSDarrick J. Wong */ 91dbde19daSDarrick J. Wong STATIC int 92c517b3aaSDarrick J. Wong xchk_refcountbt_rmap_check( 93dbde19daSDarrick J. Wong struct xfs_btree_cur *cur, 94*159eb69dSDarrick J. Wong const struct xfs_rmap_irec *rec, 95dbde19daSDarrick J. Wong void *priv) 96dbde19daSDarrick J. Wong { 97c517b3aaSDarrick J. Wong struct xchk_refcnt_check *refchk = priv; 98c517b3aaSDarrick J. Wong struct xchk_refcnt_frag *frag; 99dbde19daSDarrick J. Wong xfs_agblock_t rm_last; 100dbde19daSDarrick J. Wong xfs_agblock_t rc_last; 101dbde19daSDarrick J. Wong int error = 0; 102dbde19daSDarrick J. Wong 103c517b3aaSDarrick J. Wong if (xchk_should_terminate(refchk->sc, &error)) 104dbde19daSDarrick J. Wong return error; 105dbde19daSDarrick J. Wong 106dbde19daSDarrick J. Wong rm_last = rec->rm_startblock + rec->rm_blockcount - 1; 107dbde19daSDarrick J. Wong rc_last = refchk->bno + refchk->len - 1; 108dbde19daSDarrick J. Wong 109dbde19daSDarrick J. Wong /* Confirm that a single-owner refc extent is a CoW stage. */ 110dbde19daSDarrick J. Wong if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) { 111c517b3aaSDarrick J. Wong xchk_btree_xref_set_corrupt(refchk->sc, cur, 0); 112dbde19daSDarrick J. Wong return 0; 113dbde19daSDarrick J. Wong } 114dbde19daSDarrick J. Wong 115dbde19daSDarrick J. Wong if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) { 116dbde19daSDarrick J. Wong /* 117dbde19daSDarrick J. Wong * The rmap overlaps the refcount record, so we can confirm 118dbde19daSDarrick J. Wong * one refcount owner seen. 119dbde19daSDarrick J. Wong */ 120dbde19daSDarrick J. Wong refchk->seen++; 121dbde19daSDarrick J. Wong } else { 122dbde19daSDarrick J. Wong /* 123dbde19daSDarrick J. Wong * This rmap covers only part of the refcount record, so 124dbde19daSDarrick J. Wong * save the fragment for later processing. If the rmapbt 125dbde19daSDarrick J. Wong * is healthy each rmap_irec we see will be in agbno order 126dbde19daSDarrick J. Wong * so we don't need insertion sort here. 127dbde19daSDarrick J. Wong */ 128c517b3aaSDarrick J. Wong frag = kmem_alloc(sizeof(struct xchk_refcnt_frag), 129631fc955SDarrick J. Wong KM_MAYFAIL); 130dbde19daSDarrick J. Wong if (!frag) 131dbde19daSDarrick J. Wong return -ENOMEM; 132dbde19daSDarrick J. Wong memcpy(&frag->rm, rec, sizeof(frag->rm)); 133dbde19daSDarrick J. Wong list_add_tail(&frag->list, &refchk->fragments); 134dbde19daSDarrick J. Wong } 135dbde19daSDarrick J. Wong 136dbde19daSDarrick J. Wong return 0; 137dbde19daSDarrick J. Wong } 138dbde19daSDarrick J. Wong 139dbde19daSDarrick J. Wong /* 140dbde19daSDarrick J. Wong * Given a bunch of rmap fragments, iterate through them, keeping 141dbde19daSDarrick J. Wong * a running tally of the refcount. If this ever deviates from 142dbde19daSDarrick J. Wong * what we expect (which is the refcountbt's refcount minus the 143dbde19daSDarrick J. Wong * number of extents that totally covered the refcountbt extent), 144dbde19daSDarrick J. Wong * we have a refcountbt error. 145dbde19daSDarrick J. Wong */ 146dbde19daSDarrick J. Wong STATIC void 147c517b3aaSDarrick J. Wong xchk_refcountbt_process_rmap_fragments( 148c517b3aaSDarrick J. Wong struct xchk_refcnt_check *refchk) 149dbde19daSDarrick J. Wong { 150dbde19daSDarrick J. Wong struct list_head worklist; 151c517b3aaSDarrick J. Wong struct xchk_refcnt_frag *frag; 152c517b3aaSDarrick J. Wong struct xchk_refcnt_frag *n; 153dbde19daSDarrick J. Wong xfs_agblock_t bno; 154dbde19daSDarrick J. Wong xfs_agblock_t rbno; 155dbde19daSDarrick J. Wong xfs_agblock_t next_rbno; 156dbde19daSDarrick J. Wong xfs_nlink_t nr; 157dbde19daSDarrick J. Wong xfs_nlink_t target_nr; 158dbde19daSDarrick J. Wong 159dbde19daSDarrick J. Wong target_nr = refchk->refcount - refchk->seen; 160dbde19daSDarrick J. Wong if (target_nr == 0) 161dbde19daSDarrick J. Wong return; 162dbde19daSDarrick J. Wong 163dbde19daSDarrick J. Wong /* 164dbde19daSDarrick J. Wong * There are (refchk->rc.rc_refcount - refchk->nr refcount) 165dbde19daSDarrick J. Wong * references we haven't found yet. Pull that many off the 166dbde19daSDarrick J. Wong * fragment list and figure out where the smallest rmap ends 167dbde19daSDarrick J. Wong * (and therefore the next rmap should start). All the rmaps 168dbde19daSDarrick J. Wong * we pull off should start at or before the beginning of the 169dbde19daSDarrick J. Wong * refcount record's range. 170dbde19daSDarrick J. Wong */ 171dbde19daSDarrick J. Wong INIT_LIST_HEAD(&worklist); 172dbde19daSDarrick J. Wong rbno = NULLAGBLOCK; 173dbde19daSDarrick J. Wong 174dbde19daSDarrick J. Wong /* Make sure the fragments actually /are/ in agbno order. */ 175dbde19daSDarrick J. Wong bno = 0; 176dbde19daSDarrick J. Wong list_for_each_entry(frag, &refchk->fragments, list) { 177dbde19daSDarrick J. Wong if (frag->rm.rm_startblock < bno) 178dbde19daSDarrick J. Wong goto done; 179dbde19daSDarrick J. Wong bno = frag->rm.rm_startblock; 180dbde19daSDarrick J. Wong } 181dbde19daSDarrick J. Wong 182dbde19daSDarrick J. Wong /* 183dbde19daSDarrick J. Wong * Find all the rmaps that start at or before the refc extent, 184dbde19daSDarrick J. Wong * and put them on the worklist. 185dbde19daSDarrick J. Wong */ 18654e9b09eSDarrick J. Wong nr = 0; 187dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &refchk->fragments, list) { 18854e9b09eSDarrick J. Wong if (frag->rm.rm_startblock > refchk->bno || nr > target_nr) 18954e9b09eSDarrick J. Wong break; 190dbde19daSDarrick J. Wong bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; 191dbde19daSDarrick J. Wong if (bno < rbno) 192dbde19daSDarrick J. Wong rbno = bno; 193dbde19daSDarrick J. Wong list_move_tail(&frag->list, &worklist); 194dbde19daSDarrick J. Wong nr++; 195dbde19daSDarrick J. Wong } 196dbde19daSDarrick J. Wong 197dbde19daSDarrick J. Wong /* 198dbde19daSDarrick J. Wong * We should have found exactly $target_nr rmap fragments starting 199dbde19daSDarrick J. Wong * at or before the refcount extent. 200dbde19daSDarrick J. Wong */ 201dbde19daSDarrick J. Wong if (nr != target_nr) 202dbde19daSDarrick J. Wong goto done; 203dbde19daSDarrick J. Wong 204dbde19daSDarrick J. Wong while (!list_empty(&refchk->fragments)) { 205dbde19daSDarrick J. Wong /* Discard any fragments ending at rbno from the worklist. */ 206dbde19daSDarrick J. Wong nr = 0; 207dbde19daSDarrick J. Wong next_rbno = NULLAGBLOCK; 208dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &worklist, list) { 209dbde19daSDarrick J. Wong bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; 210dbde19daSDarrick J. Wong if (bno != rbno) { 211dbde19daSDarrick J. Wong if (bno < next_rbno) 212dbde19daSDarrick J. Wong next_rbno = bno; 213dbde19daSDarrick J. Wong continue; 214dbde19daSDarrick J. Wong } 215dbde19daSDarrick J. Wong list_del(&frag->list); 216dbde19daSDarrick J. Wong kmem_free(frag); 217dbde19daSDarrick J. Wong nr++; 218dbde19daSDarrick J. Wong } 219dbde19daSDarrick J. Wong 220dbde19daSDarrick J. Wong /* Try to add nr rmaps starting at rbno to the worklist. */ 221dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &refchk->fragments, list) { 222dbde19daSDarrick J. Wong bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; 223dbde19daSDarrick J. Wong if (frag->rm.rm_startblock != rbno) 224dbde19daSDarrick J. Wong goto done; 225dbde19daSDarrick J. Wong list_move_tail(&frag->list, &worklist); 226dbde19daSDarrick J. Wong if (next_rbno > bno) 227dbde19daSDarrick J. Wong next_rbno = bno; 228dbde19daSDarrick J. Wong nr--; 229dbde19daSDarrick J. Wong if (nr == 0) 230dbde19daSDarrick J. Wong break; 231dbde19daSDarrick J. Wong } 232dbde19daSDarrick J. Wong 233dbde19daSDarrick J. Wong /* 234dbde19daSDarrick J. Wong * If we get here and nr > 0, this means that we added fewer 235dbde19daSDarrick J. Wong * items to the worklist than we discarded because the fragment 236dbde19daSDarrick J. Wong * list ran out of items. Therefore, we cannot maintain the 237dbde19daSDarrick J. Wong * required refcount. Something is wrong, so we're done. 238dbde19daSDarrick J. Wong */ 239dbde19daSDarrick J. Wong if (nr) 240dbde19daSDarrick J. Wong goto done; 241dbde19daSDarrick J. Wong 242dbde19daSDarrick J. Wong rbno = next_rbno; 243dbde19daSDarrick J. Wong } 244dbde19daSDarrick J. Wong 245dbde19daSDarrick J. Wong /* 246dbde19daSDarrick J. Wong * Make sure the last extent we processed ends at or beyond 247dbde19daSDarrick J. Wong * the end of the refcount extent. 248dbde19daSDarrick J. Wong */ 249dbde19daSDarrick J. Wong if (rbno < refchk->bno + refchk->len) 250dbde19daSDarrick J. Wong goto done; 251dbde19daSDarrick J. Wong 252dbde19daSDarrick J. Wong /* Actually record us having seen the remaining refcount. */ 253dbde19daSDarrick J. Wong refchk->seen = refchk->refcount; 254dbde19daSDarrick J. Wong done: 255dbde19daSDarrick J. Wong /* Delete fragments and work list. */ 256dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &worklist, list) { 257dbde19daSDarrick J. Wong list_del(&frag->list); 258dbde19daSDarrick J. Wong kmem_free(frag); 259dbde19daSDarrick J. Wong } 260dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &refchk->fragments, list) { 261dbde19daSDarrick J. Wong list_del(&frag->list); 262dbde19daSDarrick J. Wong kmem_free(frag); 263dbde19daSDarrick J. Wong } 264dbde19daSDarrick J. Wong } 265dbde19daSDarrick J. Wong 266dbde19daSDarrick J. Wong /* Use the rmap entries covering this extent to verify the refcount. */ 267dbde19daSDarrick J. Wong STATIC void 268c517b3aaSDarrick J. Wong xchk_refcountbt_xref_rmap( 2691d8a748aSDarrick J. Wong struct xfs_scrub *sc, 270dbde19daSDarrick J. Wong xfs_agblock_t bno, 271dbde19daSDarrick J. Wong xfs_extlen_t len, 272dbde19daSDarrick J. Wong xfs_nlink_t refcount) 273dbde19daSDarrick J. Wong { 274c517b3aaSDarrick J. Wong struct xchk_refcnt_check refchk = { 275dbde19daSDarrick J. Wong .sc = sc, 276dbde19daSDarrick J. Wong .bno = bno, 277dbde19daSDarrick J. Wong .len = len, 278dbde19daSDarrick J. Wong .refcount = refcount, 279dbde19daSDarrick J. Wong .seen = 0, 280dbde19daSDarrick J. Wong }; 281dbde19daSDarrick J. Wong struct xfs_rmap_irec low; 282dbde19daSDarrick J. Wong struct xfs_rmap_irec high; 283c517b3aaSDarrick J. Wong struct xchk_refcnt_frag *frag; 284c517b3aaSDarrick J. Wong struct xchk_refcnt_frag *n; 285dbde19daSDarrick J. Wong int error; 286dbde19daSDarrick J. Wong 287c517b3aaSDarrick J. Wong if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) 288dbde19daSDarrick J. Wong return; 289dbde19daSDarrick J. Wong 290dbde19daSDarrick J. Wong /* Cross-reference with the rmapbt to confirm the refcount. */ 291dbde19daSDarrick J. Wong memset(&low, 0, sizeof(low)); 292dbde19daSDarrick J. Wong low.rm_startblock = bno; 293dbde19daSDarrick J. Wong memset(&high, 0xFF, sizeof(high)); 294dbde19daSDarrick J. Wong high.rm_startblock = bno + len - 1; 295dbde19daSDarrick J. Wong 296dbde19daSDarrick J. Wong INIT_LIST_HEAD(&refchk.fragments); 297dbde19daSDarrick J. Wong error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high, 298c517b3aaSDarrick J. Wong &xchk_refcountbt_rmap_check, &refchk); 299c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 300dbde19daSDarrick J. Wong goto out_free; 301dbde19daSDarrick J. Wong 302c517b3aaSDarrick J. Wong xchk_refcountbt_process_rmap_fragments(&refchk); 303dbde19daSDarrick J. Wong if (refcount != refchk.seen) 304c517b3aaSDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 305dbde19daSDarrick J. Wong 306dbde19daSDarrick J. Wong out_free: 307dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &refchk.fragments, list) { 308dbde19daSDarrick J. Wong list_del(&frag->list); 309dbde19daSDarrick J. Wong kmem_free(frag); 310dbde19daSDarrick J. Wong } 311dbde19daSDarrick J. Wong } 312dbde19daSDarrick J. Wong 313166d7641SDarrick J. Wong /* Cross-reference with the other btrees. */ 314166d7641SDarrick J. Wong STATIC void 315c517b3aaSDarrick J. Wong xchk_refcountbt_xref( 3161d8a748aSDarrick J. Wong struct xfs_scrub *sc, 317166d7641SDarrick J. Wong xfs_agblock_t agbno, 318166d7641SDarrick J. Wong xfs_extlen_t len, 319166d7641SDarrick J. Wong xfs_nlink_t refcount) 320166d7641SDarrick J. Wong { 321166d7641SDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 322166d7641SDarrick J. Wong return; 32352dc4b44SDarrick J. Wong 324c517b3aaSDarrick J. Wong xchk_xref_is_used_space(sc, agbno, len); 325c517b3aaSDarrick J. Wong xchk_xref_is_not_inode_chunk(sc, agbno, len); 326c517b3aaSDarrick J. Wong xchk_refcountbt_xref_rmap(sc, agbno, len, refcount); 327166d7641SDarrick J. Wong } 328166d7641SDarrick J. Wong 329edc09b52SDarrick J. Wong /* Scrub a refcountbt record. */ 330edc09b52SDarrick J. Wong STATIC int 331c517b3aaSDarrick J. Wong xchk_refcountbt_rec( 332c517b3aaSDarrick J. Wong struct xchk_btree *bs, 333edc09b52SDarrick J. Wong union xfs_btree_rec *rec) 334edc09b52SDarrick J. Wong { 335edc09b52SDarrick J. Wong struct xfs_mount *mp = bs->cur->bc_mp; 336dbde19daSDarrick J. Wong xfs_agblock_t *cow_blocks = bs->private; 33750f02fe3SDave Chinner xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno; 338edc09b52SDarrick J. Wong xfs_agblock_t bno; 339edc09b52SDarrick J. Wong xfs_extlen_t len; 340edc09b52SDarrick J. Wong xfs_nlink_t refcount; 341edc09b52SDarrick J. Wong bool has_cowflag; 342edc09b52SDarrick J. Wong 343edc09b52SDarrick J. Wong bno = be32_to_cpu(rec->refc.rc_startblock); 344edc09b52SDarrick J. Wong len = be32_to_cpu(rec->refc.rc_blockcount); 345edc09b52SDarrick J. Wong refcount = be32_to_cpu(rec->refc.rc_refcount); 346edc09b52SDarrick J. Wong 347edc09b52SDarrick J. Wong /* Only CoW records can have refcount == 1. */ 348edc09b52SDarrick J. Wong has_cowflag = (bno & XFS_REFC_COW_START); 349edc09b52SDarrick J. Wong if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag)) 350c517b3aaSDarrick J. Wong xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 351dbde19daSDarrick J. Wong if (has_cowflag) 352dbde19daSDarrick J. Wong (*cow_blocks) += len; 353edc09b52SDarrick J. Wong 354edc09b52SDarrick J. Wong /* Check the extent. */ 355edc09b52SDarrick J. Wong bno &= ~XFS_REFC_COW_START; 356edc09b52SDarrick J. Wong if (bno + len <= bno || 357edc09b52SDarrick J. Wong !xfs_verify_agbno(mp, agno, bno) || 358edc09b52SDarrick J. Wong !xfs_verify_agbno(mp, agno, bno + len - 1)) 359c517b3aaSDarrick J. Wong xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 360edc09b52SDarrick J. Wong 361edc09b52SDarrick J. Wong if (refcount == 0) 362c517b3aaSDarrick J. Wong xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 363edc09b52SDarrick J. Wong 364c517b3aaSDarrick J. Wong xchk_refcountbt_xref(bs->sc, bno, len, refcount); 365166d7641SDarrick J. Wong 366d5cc14d9SAliasgar Surti return 0; 367edc09b52SDarrick J. Wong } 368edc09b52SDarrick J. Wong 369dbde19daSDarrick J. Wong /* Make sure we have as many refc blocks as the rmap says. */ 370dbde19daSDarrick J. Wong STATIC void 371c517b3aaSDarrick J. Wong xchk_refcount_xref_rmap( 3721d8a748aSDarrick J. Wong struct xfs_scrub *sc, 373dbde19daSDarrick J. Wong xfs_filblks_t cow_blocks) 374dbde19daSDarrick J. Wong { 375dbde19daSDarrick J. Wong xfs_extlen_t refcbt_blocks = 0; 376dbde19daSDarrick J. Wong xfs_filblks_t blocks; 377dbde19daSDarrick J. Wong int error; 378dbde19daSDarrick J. Wong 379c517b3aaSDarrick J. Wong if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) 380dbde19daSDarrick J. Wong return; 381dbde19daSDarrick J. Wong 382dbde19daSDarrick J. Wong /* Check that we saw as many refcbt blocks as the rmap knows about. */ 383dbde19daSDarrick J. Wong error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks); 384c517b3aaSDarrick J. Wong if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error)) 385dbde19daSDarrick J. Wong return; 3867280fedaSDarrick J. Wong error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, 3877280fedaSDarrick J. Wong &XFS_RMAP_OINFO_REFC, &blocks); 388c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 389dbde19daSDarrick J. Wong return; 390dbde19daSDarrick J. Wong if (blocks != refcbt_blocks) 391c517b3aaSDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 392dbde19daSDarrick J. Wong 393dbde19daSDarrick J. Wong /* Check that we saw as many cow blocks as the rmap knows about. */ 3947280fedaSDarrick J. Wong error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, 3957280fedaSDarrick J. Wong &XFS_RMAP_OINFO_COW, &blocks); 396c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 397dbde19daSDarrick J. Wong return; 398dbde19daSDarrick J. Wong if (blocks != cow_blocks) 399c517b3aaSDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 400dbde19daSDarrick J. Wong } 401dbde19daSDarrick J. Wong 402edc09b52SDarrick J. Wong /* Scrub the refcount btree for some AG. */ 403edc09b52SDarrick J. Wong int 404c517b3aaSDarrick J. Wong xchk_refcountbt( 4051d8a748aSDarrick J. Wong struct xfs_scrub *sc) 406edc09b52SDarrick J. Wong { 407dbde19daSDarrick J. Wong xfs_agblock_t cow_blocks = 0; 408dbde19daSDarrick J. Wong int error; 409edc09b52SDarrick J. Wong 410c517b3aaSDarrick J. Wong error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec, 4117280fedaSDarrick J. Wong &XFS_RMAP_OINFO_REFC, &cow_blocks); 412dbde19daSDarrick J. Wong if (error) 413dbde19daSDarrick J. Wong return error; 414dbde19daSDarrick J. Wong 41566e3237eSDarrick J. Wong xchk_refcount_xref_rmap(sc, cow_blocks); 416dbde19daSDarrick J. Wong 417dbde19daSDarrick J. Wong return 0; 418edc09b52SDarrick J. Wong } 419f6d5fc21SDarrick J. Wong 420f6d5fc21SDarrick J. Wong /* xref check that a cow staging extent is marked in the refcountbt. */ 421f6d5fc21SDarrick J. Wong void 422c517b3aaSDarrick J. Wong xchk_xref_is_cow_staging( 4231d8a748aSDarrick J. Wong struct xfs_scrub *sc, 424f6d5fc21SDarrick J. Wong xfs_agblock_t agbno, 425f6d5fc21SDarrick J. Wong xfs_extlen_t len) 426f6d5fc21SDarrick J. Wong { 427f6d5fc21SDarrick J. Wong struct xfs_refcount_irec rc; 428f6d5fc21SDarrick J. Wong bool has_cowflag; 429f6d5fc21SDarrick J. Wong int has_refcount; 430f6d5fc21SDarrick J. Wong int error; 431f6d5fc21SDarrick J. Wong 432c517b3aaSDarrick J. Wong if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) 433f6d5fc21SDarrick J. Wong return; 434f6d5fc21SDarrick J. Wong 435f6d5fc21SDarrick J. Wong /* Find the CoW staging extent. */ 436f6d5fc21SDarrick J. Wong error = xfs_refcount_lookup_le(sc->sa.refc_cur, 437f6d5fc21SDarrick J. Wong agbno + XFS_REFC_COW_START, &has_refcount); 438c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) 439f6d5fc21SDarrick J. Wong return; 440f6d5fc21SDarrick J. Wong if (!has_refcount) { 441c517b3aaSDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); 442f6d5fc21SDarrick J. Wong return; 443f6d5fc21SDarrick J. Wong } 444f6d5fc21SDarrick J. Wong 445f6d5fc21SDarrick J. Wong error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount); 446c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) 447f6d5fc21SDarrick J. Wong return; 448f6d5fc21SDarrick J. Wong if (!has_refcount) { 449c517b3aaSDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); 450f6d5fc21SDarrick J. Wong return; 451f6d5fc21SDarrick J. Wong } 452f6d5fc21SDarrick J. Wong 453f6d5fc21SDarrick J. Wong /* CoW flag must be set, refcount must be 1. */ 454f6d5fc21SDarrick J. Wong has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START); 455f6d5fc21SDarrick J. Wong if (!has_cowflag || rc.rc_refcount != 1) 456c517b3aaSDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); 457f6d5fc21SDarrick J. Wong 458f6d5fc21SDarrick J. Wong /* Must be at least as long as what was passed in */ 459f6d5fc21SDarrick J. Wong if (rc.rc_blockcount < len) 460c517b3aaSDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); 461f6d5fc21SDarrick J. Wong } 462f6d5fc21SDarrick J. Wong 463f6d5fc21SDarrick J. Wong /* 464f6d5fc21SDarrick J. Wong * xref check that the extent is not shared. Only file data blocks 465f6d5fc21SDarrick J. Wong * can have multiple owners. 466f6d5fc21SDarrick J. Wong */ 467f6d5fc21SDarrick J. Wong void 468c517b3aaSDarrick J. Wong xchk_xref_is_not_shared( 4691d8a748aSDarrick J. Wong struct xfs_scrub *sc, 470f6d5fc21SDarrick J. Wong xfs_agblock_t agbno, 471f6d5fc21SDarrick J. Wong xfs_extlen_t len) 472f6d5fc21SDarrick J. Wong { 473f6d5fc21SDarrick J. Wong bool shared; 474f6d5fc21SDarrick J. Wong int error; 475f6d5fc21SDarrick J. Wong 476c517b3aaSDarrick J. Wong if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) 477f6d5fc21SDarrick J. Wong return; 478f6d5fc21SDarrick J. Wong 479f6d5fc21SDarrick J. Wong error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared); 480c517b3aaSDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) 481f6d5fc21SDarrick J. Wong return; 482f6d5fc21SDarrick J. Wong if (shared) 483c517b3aaSDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); 484f6d5fc21SDarrick J. Wong } 485