1edc09b52SDarrick J. Wong /* 2edc09b52SDarrick J. Wong * Copyright (C) 2017 Oracle. All Rights Reserved. 3edc09b52SDarrick J. Wong * 4edc09b52SDarrick J. Wong * Author: Darrick J. Wong <darrick.wong@oracle.com> 5edc09b52SDarrick J. Wong * 6edc09b52SDarrick J. Wong * This program is free software; you can redistribute it and/or 7edc09b52SDarrick J. Wong * modify it under the terms of the GNU General Public License 8edc09b52SDarrick J. Wong * as published by the Free Software Foundation; either version 2 9edc09b52SDarrick J. Wong * of the License, or (at your option) any later version. 10edc09b52SDarrick J. Wong * 11edc09b52SDarrick J. Wong * This program is distributed in the hope that it would be useful, 12edc09b52SDarrick J. Wong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13edc09b52SDarrick J. Wong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14edc09b52SDarrick J. Wong * GNU General Public License for more details. 15edc09b52SDarrick J. Wong * 16edc09b52SDarrick J. Wong * You should have received a copy of the GNU General Public License 17edc09b52SDarrick J. Wong * along with this program; if not, write the Free Software Foundation, 18edc09b52SDarrick J. Wong * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. 19edc09b52SDarrick J. Wong */ 20edc09b52SDarrick J. Wong #include "xfs.h" 21edc09b52SDarrick J. Wong #include "xfs_fs.h" 22edc09b52SDarrick J. Wong #include "xfs_shared.h" 23edc09b52SDarrick J. Wong #include "xfs_format.h" 24edc09b52SDarrick J. Wong #include "xfs_trans_resv.h" 25edc09b52SDarrick J. Wong #include "xfs_mount.h" 26edc09b52SDarrick J. Wong #include "xfs_defer.h" 27edc09b52SDarrick J. Wong #include "xfs_btree.h" 28edc09b52SDarrick J. Wong #include "xfs_bit.h" 29edc09b52SDarrick J. Wong #include "xfs_log_format.h" 30edc09b52SDarrick J. Wong #include "xfs_trans.h" 31edc09b52SDarrick J. Wong #include "xfs_sb.h" 32edc09b52SDarrick J. Wong #include "xfs_alloc.h" 33edc09b52SDarrick J. Wong #include "xfs_rmap.h" 34edc09b52SDarrick J. Wong #include "scrub/xfs_scrub.h" 35edc09b52SDarrick J. Wong #include "scrub/scrub.h" 36edc09b52SDarrick J. Wong #include "scrub/common.h" 37edc09b52SDarrick J. Wong #include "scrub/btree.h" 38edc09b52SDarrick J. Wong #include "scrub/trace.h" 39edc09b52SDarrick J. Wong 40edc09b52SDarrick J. Wong /* 41edc09b52SDarrick J. Wong * Set us up to scrub reference count btrees. 42edc09b52SDarrick J. Wong */ 43edc09b52SDarrick J. Wong int 44edc09b52SDarrick J. Wong xfs_scrub_setup_ag_refcountbt( 45edc09b52SDarrick J. Wong struct xfs_scrub_context *sc, 46edc09b52SDarrick J. Wong struct xfs_inode *ip) 47edc09b52SDarrick J. Wong { 48edc09b52SDarrick J. Wong return xfs_scrub_setup_ag_btree(sc, ip, false); 49edc09b52SDarrick J. Wong } 50edc09b52SDarrick J. Wong 51edc09b52SDarrick J. Wong /* Reference count btree scrubber. */ 52edc09b52SDarrick J. Wong 53*dbde19daSDarrick J. Wong /* 54*dbde19daSDarrick J. Wong * Confirming Reference Counts via Reverse Mappings 55*dbde19daSDarrick J. Wong * 56*dbde19daSDarrick J. Wong * We want to count the reverse mappings overlapping a refcount record 57*dbde19daSDarrick J. Wong * (bno, len, refcount), allowing for the possibility that some of the 58*dbde19daSDarrick J. Wong * overlap may come from smaller adjoining reverse mappings, while some 59*dbde19daSDarrick J. Wong * comes from single extents which overlap the range entirely. The 60*dbde19daSDarrick J. Wong * outer loop is as follows: 61*dbde19daSDarrick J. Wong * 62*dbde19daSDarrick J. Wong * 1. For all reverse mappings overlapping the refcount extent, 63*dbde19daSDarrick J. Wong * a. If a given rmap completely overlaps, mark it as seen. 64*dbde19daSDarrick J. Wong * b. Otherwise, record the fragment (in agbno order) for later 65*dbde19daSDarrick J. Wong * processing. 66*dbde19daSDarrick J. Wong * 67*dbde19daSDarrick J. Wong * Once we've seen all the rmaps, we know that for all blocks in the 68*dbde19daSDarrick J. Wong * refcount record we want to find $refcount owners and we've already 69*dbde19daSDarrick J. Wong * visited $seen extents that overlap all the blocks. Therefore, we 70*dbde19daSDarrick J. Wong * need to find ($refcount - $seen) owners for every block in the 71*dbde19daSDarrick J. Wong * extent; call that quantity $target_nr. Proceed as follows: 72*dbde19daSDarrick J. Wong * 73*dbde19daSDarrick J. Wong * 2. Pull the first $target_nr fragments from the list; all of them 74*dbde19daSDarrick J. Wong * should start at or before the start of the extent. 75*dbde19daSDarrick J. Wong * Call this subset of fragments the working set. 76*dbde19daSDarrick J. Wong * 3. Until there are no more unprocessed fragments, 77*dbde19daSDarrick J. Wong * a. Find the shortest fragments in the set and remove them. 78*dbde19daSDarrick J. Wong * b. Note the block number of the end of these fragments. 79*dbde19daSDarrick J. Wong * c. Pull the same number of fragments from the list. All of these 80*dbde19daSDarrick J. Wong * fragments should start at the block number recorded in the 81*dbde19daSDarrick J. Wong * previous step. 82*dbde19daSDarrick J. Wong * d. Put those fragments in the set. 83*dbde19daSDarrick J. Wong * 4. Check that there are $target_nr fragments remaining in the list, 84*dbde19daSDarrick J. Wong * and that they all end at or beyond the end of the refcount extent. 85*dbde19daSDarrick J. Wong * 86*dbde19daSDarrick J. Wong * If the refcount is correct, all the check conditions in the algorithm 87*dbde19daSDarrick J. Wong * should always hold true. If not, the refcount is incorrect. 88*dbde19daSDarrick J. Wong */ 89*dbde19daSDarrick J. Wong struct xfs_scrub_refcnt_frag { 90*dbde19daSDarrick J. Wong struct list_head list; 91*dbde19daSDarrick J. Wong struct xfs_rmap_irec rm; 92*dbde19daSDarrick J. Wong }; 93*dbde19daSDarrick J. Wong 94*dbde19daSDarrick J. Wong struct xfs_scrub_refcnt_check { 95*dbde19daSDarrick J. Wong struct xfs_scrub_context *sc; 96*dbde19daSDarrick J. Wong struct list_head fragments; 97*dbde19daSDarrick J. Wong 98*dbde19daSDarrick J. Wong /* refcount extent we're examining */ 99*dbde19daSDarrick J. Wong xfs_agblock_t bno; 100*dbde19daSDarrick J. Wong xfs_extlen_t len; 101*dbde19daSDarrick J. Wong xfs_nlink_t refcount; 102*dbde19daSDarrick J. Wong 103*dbde19daSDarrick J. Wong /* number of owners seen */ 104*dbde19daSDarrick J. Wong xfs_nlink_t seen; 105*dbde19daSDarrick J. Wong }; 106*dbde19daSDarrick J. Wong 107*dbde19daSDarrick J. Wong /* 108*dbde19daSDarrick J. Wong * Decide if the given rmap is large enough that we can redeem it 109*dbde19daSDarrick J. Wong * towards refcount verification now, or if it's a fragment, in 110*dbde19daSDarrick J. Wong * which case we'll hang onto it in the hopes that we'll later 111*dbde19daSDarrick J. Wong * discover that we've collected exactly the correct number of 112*dbde19daSDarrick J. Wong * fragments as the refcountbt says we should have. 113*dbde19daSDarrick J. Wong */ 114*dbde19daSDarrick J. Wong STATIC int 115*dbde19daSDarrick J. Wong xfs_scrub_refcountbt_rmap_check( 116*dbde19daSDarrick J. Wong struct xfs_btree_cur *cur, 117*dbde19daSDarrick J. Wong struct xfs_rmap_irec *rec, 118*dbde19daSDarrick J. Wong void *priv) 119*dbde19daSDarrick J. Wong { 120*dbde19daSDarrick J. Wong struct xfs_scrub_refcnt_check *refchk = priv; 121*dbde19daSDarrick J. Wong struct xfs_scrub_refcnt_frag *frag; 122*dbde19daSDarrick J. Wong xfs_agblock_t rm_last; 123*dbde19daSDarrick J. Wong xfs_agblock_t rc_last; 124*dbde19daSDarrick J. Wong int error = 0; 125*dbde19daSDarrick J. Wong 126*dbde19daSDarrick J. Wong if (xfs_scrub_should_terminate(refchk->sc, &error)) 127*dbde19daSDarrick J. Wong return error; 128*dbde19daSDarrick J. Wong 129*dbde19daSDarrick J. Wong rm_last = rec->rm_startblock + rec->rm_blockcount - 1; 130*dbde19daSDarrick J. Wong rc_last = refchk->bno + refchk->len - 1; 131*dbde19daSDarrick J. Wong 132*dbde19daSDarrick J. Wong /* Confirm that a single-owner refc extent is a CoW stage. */ 133*dbde19daSDarrick J. Wong if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) { 134*dbde19daSDarrick J. Wong xfs_scrub_btree_xref_set_corrupt(refchk->sc, cur, 0); 135*dbde19daSDarrick J. Wong return 0; 136*dbde19daSDarrick J. Wong } 137*dbde19daSDarrick J. Wong 138*dbde19daSDarrick J. Wong if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) { 139*dbde19daSDarrick J. Wong /* 140*dbde19daSDarrick J. Wong * The rmap overlaps the refcount record, so we can confirm 141*dbde19daSDarrick J. Wong * one refcount owner seen. 142*dbde19daSDarrick J. Wong */ 143*dbde19daSDarrick J. Wong refchk->seen++; 144*dbde19daSDarrick J. Wong } else { 145*dbde19daSDarrick J. Wong /* 146*dbde19daSDarrick J. Wong * This rmap covers only part of the refcount record, so 147*dbde19daSDarrick J. Wong * save the fragment for later processing. If the rmapbt 148*dbde19daSDarrick J. Wong * is healthy each rmap_irec we see will be in agbno order 149*dbde19daSDarrick J. Wong * so we don't need insertion sort here. 150*dbde19daSDarrick J. Wong */ 151*dbde19daSDarrick J. Wong frag = kmem_alloc(sizeof(struct xfs_scrub_refcnt_frag), 152*dbde19daSDarrick J. Wong KM_MAYFAIL | KM_NOFS); 153*dbde19daSDarrick J. Wong if (!frag) 154*dbde19daSDarrick J. Wong return -ENOMEM; 155*dbde19daSDarrick J. Wong memcpy(&frag->rm, rec, sizeof(frag->rm)); 156*dbde19daSDarrick J. Wong list_add_tail(&frag->list, &refchk->fragments); 157*dbde19daSDarrick J. Wong } 158*dbde19daSDarrick J. Wong 159*dbde19daSDarrick J. Wong return 0; 160*dbde19daSDarrick J. Wong } 161*dbde19daSDarrick J. Wong 162*dbde19daSDarrick J. Wong /* 163*dbde19daSDarrick J. Wong * Given a bunch of rmap fragments, iterate through them, keeping 164*dbde19daSDarrick J. Wong * a running tally of the refcount. If this ever deviates from 165*dbde19daSDarrick J. Wong * what we expect (which is the refcountbt's refcount minus the 166*dbde19daSDarrick J. Wong * number of extents that totally covered the refcountbt extent), 167*dbde19daSDarrick J. Wong * we have a refcountbt error. 168*dbde19daSDarrick J. Wong */ 169*dbde19daSDarrick J. Wong STATIC void 170*dbde19daSDarrick J. Wong xfs_scrub_refcountbt_process_rmap_fragments( 171*dbde19daSDarrick J. Wong struct xfs_scrub_refcnt_check *refchk) 172*dbde19daSDarrick J. Wong { 173*dbde19daSDarrick J. Wong struct list_head worklist; 174*dbde19daSDarrick J. Wong struct xfs_scrub_refcnt_frag *frag; 175*dbde19daSDarrick J. Wong struct xfs_scrub_refcnt_frag *n; 176*dbde19daSDarrick J. Wong xfs_agblock_t bno; 177*dbde19daSDarrick J. Wong xfs_agblock_t rbno; 178*dbde19daSDarrick J. Wong xfs_agblock_t next_rbno; 179*dbde19daSDarrick J. Wong xfs_nlink_t nr; 180*dbde19daSDarrick J. Wong xfs_nlink_t target_nr; 181*dbde19daSDarrick J. Wong 182*dbde19daSDarrick J. Wong target_nr = refchk->refcount - refchk->seen; 183*dbde19daSDarrick J. Wong if (target_nr == 0) 184*dbde19daSDarrick J. Wong return; 185*dbde19daSDarrick J. Wong 186*dbde19daSDarrick J. Wong /* 187*dbde19daSDarrick J. Wong * There are (refchk->rc.rc_refcount - refchk->nr refcount) 188*dbde19daSDarrick J. Wong * references we haven't found yet. Pull that many off the 189*dbde19daSDarrick J. Wong * fragment list and figure out where the smallest rmap ends 190*dbde19daSDarrick J. Wong * (and therefore the next rmap should start). All the rmaps 191*dbde19daSDarrick J. Wong * we pull off should start at or before the beginning of the 192*dbde19daSDarrick J. Wong * refcount record's range. 193*dbde19daSDarrick J. Wong */ 194*dbde19daSDarrick J. Wong INIT_LIST_HEAD(&worklist); 195*dbde19daSDarrick J. Wong rbno = NULLAGBLOCK; 196*dbde19daSDarrick J. Wong nr = 1; 197*dbde19daSDarrick J. Wong 198*dbde19daSDarrick J. Wong /* Make sure the fragments actually /are/ in agbno order. */ 199*dbde19daSDarrick J. Wong bno = 0; 200*dbde19daSDarrick J. Wong list_for_each_entry(frag, &refchk->fragments, list) { 201*dbde19daSDarrick J. Wong if (frag->rm.rm_startblock < bno) 202*dbde19daSDarrick J. Wong goto done; 203*dbde19daSDarrick J. Wong bno = frag->rm.rm_startblock; 204*dbde19daSDarrick J. Wong } 205*dbde19daSDarrick J. Wong 206*dbde19daSDarrick J. Wong /* 207*dbde19daSDarrick J. Wong * Find all the rmaps that start at or before the refc extent, 208*dbde19daSDarrick J. Wong * and put them on the worklist. 209*dbde19daSDarrick J. Wong */ 210*dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &refchk->fragments, list) { 211*dbde19daSDarrick J. Wong if (frag->rm.rm_startblock > refchk->bno) 212*dbde19daSDarrick J. Wong goto done; 213*dbde19daSDarrick J. Wong bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; 214*dbde19daSDarrick J. Wong if (bno < rbno) 215*dbde19daSDarrick J. Wong rbno = bno; 216*dbde19daSDarrick J. Wong list_move_tail(&frag->list, &worklist); 217*dbde19daSDarrick J. Wong if (nr == target_nr) 218*dbde19daSDarrick J. Wong break; 219*dbde19daSDarrick J. Wong nr++; 220*dbde19daSDarrick J. Wong } 221*dbde19daSDarrick J. Wong 222*dbde19daSDarrick J. Wong /* 223*dbde19daSDarrick J. Wong * We should have found exactly $target_nr rmap fragments starting 224*dbde19daSDarrick J. Wong * at or before the refcount extent. 225*dbde19daSDarrick J. Wong */ 226*dbde19daSDarrick J. Wong if (nr != target_nr) 227*dbde19daSDarrick J. Wong goto done; 228*dbde19daSDarrick J. Wong 229*dbde19daSDarrick J. Wong while (!list_empty(&refchk->fragments)) { 230*dbde19daSDarrick J. Wong /* Discard any fragments ending at rbno from the worklist. */ 231*dbde19daSDarrick J. Wong nr = 0; 232*dbde19daSDarrick J. Wong next_rbno = NULLAGBLOCK; 233*dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &worklist, list) { 234*dbde19daSDarrick J. Wong bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; 235*dbde19daSDarrick J. Wong if (bno != rbno) { 236*dbde19daSDarrick J. Wong if (bno < next_rbno) 237*dbde19daSDarrick J. Wong next_rbno = bno; 238*dbde19daSDarrick J. Wong continue; 239*dbde19daSDarrick J. Wong } 240*dbde19daSDarrick J. Wong list_del(&frag->list); 241*dbde19daSDarrick J. Wong kmem_free(frag); 242*dbde19daSDarrick J. Wong nr++; 243*dbde19daSDarrick J. Wong } 244*dbde19daSDarrick J. Wong 245*dbde19daSDarrick J. Wong /* Try to add nr rmaps starting at rbno to the worklist. */ 246*dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &refchk->fragments, list) { 247*dbde19daSDarrick J. Wong bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; 248*dbde19daSDarrick J. Wong if (frag->rm.rm_startblock != rbno) 249*dbde19daSDarrick J. Wong goto done; 250*dbde19daSDarrick J. Wong list_move_tail(&frag->list, &worklist); 251*dbde19daSDarrick J. Wong if (next_rbno > bno) 252*dbde19daSDarrick J. Wong next_rbno = bno; 253*dbde19daSDarrick J. Wong nr--; 254*dbde19daSDarrick J. Wong if (nr == 0) 255*dbde19daSDarrick J. Wong break; 256*dbde19daSDarrick J. Wong } 257*dbde19daSDarrick J. Wong 258*dbde19daSDarrick J. Wong /* 259*dbde19daSDarrick J. Wong * If we get here and nr > 0, this means that we added fewer 260*dbde19daSDarrick J. Wong * items to the worklist than we discarded because the fragment 261*dbde19daSDarrick J. Wong * list ran out of items. Therefore, we cannot maintain the 262*dbde19daSDarrick J. Wong * required refcount. Something is wrong, so we're done. 263*dbde19daSDarrick J. Wong */ 264*dbde19daSDarrick J. Wong if (nr) 265*dbde19daSDarrick J. Wong goto done; 266*dbde19daSDarrick J. Wong 267*dbde19daSDarrick J. Wong rbno = next_rbno; 268*dbde19daSDarrick J. Wong } 269*dbde19daSDarrick J. Wong 270*dbde19daSDarrick J. Wong /* 271*dbde19daSDarrick J. Wong * Make sure the last extent we processed ends at or beyond 272*dbde19daSDarrick J. Wong * the end of the refcount extent. 273*dbde19daSDarrick J. Wong */ 274*dbde19daSDarrick J. Wong if (rbno < refchk->bno + refchk->len) 275*dbde19daSDarrick J. Wong goto done; 276*dbde19daSDarrick J. Wong 277*dbde19daSDarrick J. Wong /* Actually record us having seen the remaining refcount. */ 278*dbde19daSDarrick J. Wong refchk->seen = refchk->refcount; 279*dbde19daSDarrick J. Wong done: 280*dbde19daSDarrick J. Wong /* Delete fragments and work list. */ 281*dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &worklist, list) { 282*dbde19daSDarrick J. Wong list_del(&frag->list); 283*dbde19daSDarrick J. Wong kmem_free(frag); 284*dbde19daSDarrick J. Wong } 285*dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &refchk->fragments, list) { 286*dbde19daSDarrick J. Wong list_del(&frag->list); 287*dbde19daSDarrick J. Wong kmem_free(frag); 288*dbde19daSDarrick J. Wong } 289*dbde19daSDarrick J. Wong } 290*dbde19daSDarrick J. Wong 291*dbde19daSDarrick J. Wong /* Use the rmap entries covering this extent to verify the refcount. */ 292*dbde19daSDarrick J. Wong STATIC void 293*dbde19daSDarrick J. Wong xfs_scrub_refcountbt_xref_rmap( 294*dbde19daSDarrick J. Wong struct xfs_scrub_context *sc, 295*dbde19daSDarrick J. Wong xfs_agblock_t bno, 296*dbde19daSDarrick J. Wong xfs_extlen_t len, 297*dbde19daSDarrick J. Wong xfs_nlink_t refcount) 298*dbde19daSDarrick J. Wong { 299*dbde19daSDarrick J. Wong struct xfs_scrub_refcnt_check refchk = { 300*dbde19daSDarrick J. Wong .sc = sc, 301*dbde19daSDarrick J. Wong .bno = bno, 302*dbde19daSDarrick J. Wong .len = len, 303*dbde19daSDarrick J. Wong .refcount = refcount, 304*dbde19daSDarrick J. Wong .seen = 0, 305*dbde19daSDarrick J. Wong }; 306*dbde19daSDarrick J. Wong struct xfs_rmap_irec low; 307*dbde19daSDarrick J. Wong struct xfs_rmap_irec high; 308*dbde19daSDarrick J. Wong struct xfs_scrub_refcnt_frag *frag; 309*dbde19daSDarrick J. Wong struct xfs_scrub_refcnt_frag *n; 310*dbde19daSDarrick J. Wong int error; 311*dbde19daSDarrick J. Wong 312*dbde19daSDarrick J. Wong if (!sc->sa.rmap_cur) 313*dbde19daSDarrick J. Wong return; 314*dbde19daSDarrick J. Wong 315*dbde19daSDarrick J. Wong /* Cross-reference with the rmapbt to confirm the refcount. */ 316*dbde19daSDarrick J. Wong memset(&low, 0, sizeof(low)); 317*dbde19daSDarrick J. Wong low.rm_startblock = bno; 318*dbde19daSDarrick J. Wong memset(&high, 0xFF, sizeof(high)); 319*dbde19daSDarrick J. Wong high.rm_startblock = bno + len - 1; 320*dbde19daSDarrick J. Wong 321*dbde19daSDarrick J. Wong INIT_LIST_HEAD(&refchk.fragments); 322*dbde19daSDarrick J. Wong error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high, 323*dbde19daSDarrick J. Wong &xfs_scrub_refcountbt_rmap_check, &refchk); 324*dbde19daSDarrick J. Wong if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 325*dbde19daSDarrick J. Wong goto out_free; 326*dbde19daSDarrick J. Wong 327*dbde19daSDarrick J. Wong xfs_scrub_refcountbt_process_rmap_fragments(&refchk); 328*dbde19daSDarrick J. Wong if (refcount != refchk.seen) 329*dbde19daSDarrick J. Wong xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 330*dbde19daSDarrick J. Wong 331*dbde19daSDarrick J. Wong out_free: 332*dbde19daSDarrick J. Wong list_for_each_entry_safe(frag, n, &refchk.fragments, list) { 333*dbde19daSDarrick J. Wong list_del(&frag->list); 334*dbde19daSDarrick J. Wong kmem_free(frag); 335*dbde19daSDarrick J. Wong } 336*dbde19daSDarrick J. Wong } 337*dbde19daSDarrick J. Wong 338166d7641SDarrick J. Wong /* Cross-reference with the other btrees. */ 339166d7641SDarrick J. Wong STATIC void 340166d7641SDarrick J. Wong xfs_scrub_refcountbt_xref( 341166d7641SDarrick J. Wong struct xfs_scrub_context *sc, 342166d7641SDarrick J. Wong xfs_agblock_t agbno, 343166d7641SDarrick J. Wong xfs_extlen_t len, 344166d7641SDarrick J. Wong xfs_nlink_t refcount) 345166d7641SDarrick J. Wong { 346166d7641SDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 347166d7641SDarrick J. Wong return; 34852dc4b44SDarrick J. Wong 34952dc4b44SDarrick J. Wong xfs_scrub_xref_is_used_space(sc, agbno, len); 3502e6f2756SDarrick J. Wong xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len); 351*dbde19daSDarrick J. Wong xfs_scrub_refcountbt_xref_rmap(sc, agbno, len, refcount); 352166d7641SDarrick J. Wong } 353166d7641SDarrick J. Wong 354edc09b52SDarrick J. Wong /* Scrub a refcountbt record. */ 355edc09b52SDarrick J. Wong STATIC int 356edc09b52SDarrick J. Wong xfs_scrub_refcountbt_rec( 357edc09b52SDarrick J. Wong struct xfs_scrub_btree *bs, 358edc09b52SDarrick J. Wong union xfs_btree_rec *rec) 359edc09b52SDarrick J. Wong { 360edc09b52SDarrick J. Wong struct xfs_mount *mp = bs->cur->bc_mp; 361*dbde19daSDarrick J. Wong xfs_agblock_t *cow_blocks = bs->private; 362edc09b52SDarrick J. Wong xfs_agnumber_t agno = bs->cur->bc_private.a.agno; 363edc09b52SDarrick J. Wong xfs_agblock_t bno; 364edc09b52SDarrick J. Wong xfs_extlen_t len; 365edc09b52SDarrick J. Wong xfs_nlink_t refcount; 366edc09b52SDarrick J. Wong bool has_cowflag; 367edc09b52SDarrick J. Wong int error = 0; 368edc09b52SDarrick J. Wong 369edc09b52SDarrick J. Wong bno = be32_to_cpu(rec->refc.rc_startblock); 370edc09b52SDarrick J. Wong len = be32_to_cpu(rec->refc.rc_blockcount); 371edc09b52SDarrick J. Wong refcount = be32_to_cpu(rec->refc.rc_refcount); 372edc09b52SDarrick J. Wong 373edc09b52SDarrick J. Wong /* Only CoW records can have refcount == 1. */ 374edc09b52SDarrick J. Wong has_cowflag = (bno & XFS_REFC_COW_START); 375edc09b52SDarrick J. Wong if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag)) 376edc09b52SDarrick J. Wong xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); 377*dbde19daSDarrick J. Wong if (has_cowflag) 378*dbde19daSDarrick J. Wong (*cow_blocks) += len; 379edc09b52SDarrick J. Wong 380edc09b52SDarrick J. Wong /* Check the extent. */ 381edc09b52SDarrick J. Wong bno &= ~XFS_REFC_COW_START; 382edc09b52SDarrick J. Wong if (bno + len <= bno || 383edc09b52SDarrick J. Wong !xfs_verify_agbno(mp, agno, bno) || 384edc09b52SDarrick J. Wong !xfs_verify_agbno(mp, agno, bno + len - 1)) 385edc09b52SDarrick J. Wong xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); 386edc09b52SDarrick J. Wong 387edc09b52SDarrick J. Wong if (refcount == 0) 388edc09b52SDarrick J. Wong xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); 389edc09b52SDarrick J. Wong 390166d7641SDarrick J. Wong xfs_scrub_refcountbt_xref(bs->sc, bno, len, refcount); 391166d7641SDarrick J. Wong 392edc09b52SDarrick J. Wong return error; 393edc09b52SDarrick J. Wong } 394edc09b52SDarrick J. Wong 395*dbde19daSDarrick J. Wong /* Make sure we have as many refc blocks as the rmap says. */ 396*dbde19daSDarrick J. Wong STATIC void 397*dbde19daSDarrick J. Wong xfs_scrub_refcount_xref_rmap( 398*dbde19daSDarrick J. Wong struct xfs_scrub_context *sc, 399*dbde19daSDarrick J. Wong struct xfs_owner_info *oinfo, 400*dbde19daSDarrick J. Wong xfs_filblks_t cow_blocks) 401*dbde19daSDarrick J. Wong { 402*dbde19daSDarrick J. Wong xfs_extlen_t refcbt_blocks = 0; 403*dbde19daSDarrick J. Wong xfs_filblks_t blocks; 404*dbde19daSDarrick J. Wong int error; 405*dbde19daSDarrick J. Wong 406*dbde19daSDarrick J. Wong if (!sc->sa.rmap_cur) 407*dbde19daSDarrick J. Wong return; 408*dbde19daSDarrick J. Wong 409*dbde19daSDarrick J. Wong /* Check that we saw as many refcbt blocks as the rmap knows about. */ 410*dbde19daSDarrick J. Wong error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks); 411*dbde19daSDarrick J. Wong if (!xfs_scrub_btree_process_error(sc, sc->sa.refc_cur, 0, &error)) 412*dbde19daSDarrick J. Wong return; 413*dbde19daSDarrick J. Wong error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo, 414*dbde19daSDarrick J. Wong &blocks); 415*dbde19daSDarrick J. Wong if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 416*dbde19daSDarrick J. Wong return; 417*dbde19daSDarrick J. Wong if (blocks != refcbt_blocks) 418*dbde19daSDarrick J. Wong xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 419*dbde19daSDarrick J. Wong 420*dbde19daSDarrick J. Wong /* Check that we saw as many cow blocks as the rmap knows about. */ 421*dbde19daSDarrick J. Wong xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW); 422*dbde19daSDarrick J. Wong error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo, 423*dbde19daSDarrick J. Wong &blocks); 424*dbde19daSDarrick J. Wong if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 425*dbde19daSDarrick J. Wong return; 426*dbde19daSDarrick J. Wong if (blocks != cow_blocks) 427*dbde19daSDarrick J. Wong xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 428*dbde19daSDarrick J. Wong } 429*dbde19daSDarrick J. Wong 430edc09b52SDarrick J. Wong /* Scrub the refcount btree for some AG. */ 431edc09b52SDarrick J. Wong int 432edc09b52SDarrick J. Wong xfs_scrub_refcountbt( 433edc09b52SDarrick J. Wong struct xfs_scrub_context *sc) 434edc09b52SDarrick J. Wong { 435edc09b52SDarrick J. Wong struct xfs_owner_info oinfo; 436*dbde19daSDarrick J. Wong xfs_agblock_t cow_blocks = 0; 437*dbde19daSDarrick J. Wong int error; 438edc09b52SDarrick J. Wong 439edc09b52SDarrick J. Wong xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC); 440*dbde19daSDarrick J. Wong error = xfs_scrub_btree(sc, sc->sa.refc_cur, xfs_scrub_refcountbt_rec, 441*dbde19daSDarrick J. Wong &oinfo, &cow_blocks); 442*dbde19daSDarrick J. Wong if (error) 443*dbde19daSDarrick J. Wong return error; 444*dbde19daSDarrick J. Wong 445*dbde19daSDarrick J. Wong xfs_scrub_refcount_xref_rmap(sc, &oinfo, cow_blocks); 446*dbde19daSDarrick J. Wong 447*dbde19daSDarrick J. Wong return 0; 448edc09b52SDarrick J. Wong } 449