1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_trans.h" 14 #include "xfs_btree.h" 15 #include "xfs_rmap.h" 16 #include "xfs_refcount.h" 17 #include "xfs_ag.h" 18 #include "xfs_bit.h" 19 #include "xfs_alloc.h" 20 #include "xfs_alloc_btree.h" 21 #include "xfs_ialloc_btree.h" 22 #include "xfs_refcount_btree.h" 23 #include "scrub/scrub.h" 24 #include "scrub/common.h" 25 #include "scrub/btree.h" 26 #include "scrub/bitmap.h" 27 28 /* 29 * Set us up to scrub reverse mapping btrees. 30 */ 31 int 32 xchk_setup_ag_rmapbt( 33 struct xfs_scrub *sc) 34 { 35 if (xchk_need_intent_drain(sc)) 36 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN); 37 38 return xchk_setup_ag_btree(sc, false); 39 } 40 41 /* Reverse-mapping scrubber. */ 42 43 struct xchk_rmap { 44 /* 45 * The furthest-reaching of the rmapbt records that we've already 46 * processed. This enables us to detect overlapping records for space 47 * allocations that cannot be shared. 48 */ 49 struct xfs_rmap_irec overlap_rec; 50 51 /* 52 * The previous rmapbt record, so that we can check for two records 53 * that could be one. 54 */ 55 struct xfs_rmap_irec prev_rec; 56 57 /* Bitmaps containing all blocks for each type of AG metadata. */ 58 struct xagb_bitmap fs_owned; 59 struct xagb_bitmap log_owned; 60 struct xagb_bitmap ag_owned; 61 struct xagb_bitmap inobt_owned; 62 struct xagb_bitmap refcbt_owned; 63 64 /* Did we complete the AG space metadata bitmaps? */ 65 bool bitmaps_complete; 66 }; 67 68 /* Cross-reference a rmap against the refcount btree. */ 69 STATIC void 70 xchk_rmapbt_xref_refc( 71 struct xfs_scrub *sc, 72 struct xfs_rmap_irec *irec) 73 { 74 xfs_agblock_t fbno; 75 xfs_extlen_t flen; 76 bool non_inode; 77 bool is_bmbt; 78 bool is_attr; 79 bool is_unwritten; 80 int error; 81 82 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) 83 return; 84 85 non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner); 86 is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK; 87 is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK; 88 is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN; 89 90 /* If this is shared, must be a data fork extent. */ 91 error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock, 92 irec->rm_blockcount, &fbno, &flen, false); 93 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) 94 return; 95 if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten)) 96 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); 97 } 98 99 /* Cross-reference with the other btrees. */ 100 STATIC void 101 xchk_rmapbt_xref( 102 struct xfs_scrub *sc, 103 struct xfs_rmap_irec *irec) 104 { 105 xfs_agblock_t agbno = irec->rm_startblock; 106 xfs_extlen_t len = irec->rm_blockcount; 107 108 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 109 return; 110 111 xchk_xref_is_used_space(sc, agbno, len); 112 if (irec->rm_owner == XFS_RMAP_OWN_INODES) 113 xchk_xref_is_inode_chunk(sc, agbno, len); 114 else 115 xchk_xref_is_not_inode_chunk(sc, agbno, len); 116 if (irec->rm_owner == XFS_RMAP_OWN_COW) 117 xchk_xref_is_cow_staging(sc, irec->rm_startblock, 118 irec->rm_blockcount); 119 else 120 xchk_rmapbt_xref_refc(sc, irec); 121 } 122 123 /* 124 * Check for bogus UNWRITTEN flags in the rmapbt node block keys. 125 * 126 * In reverse mapping records, the file mapping extent state 127 * (XFS_RMAP_OFF_UNWRITTEN) is a record attribute, not a key field. It is not 128 * involved in lookups in any way. In older kernels, the functions that 129 * convert rmapbt records to keys forgot to filter out the extent state bit, 130 * even though the key comparison functions have filtered the flag correctly. 131 * If we spot an rmap key with the unwritten bit set in rm_offset, we should 132 * mark the btree as needing optimization to rebuild the btree without those 133 * flags. 134 */ 135 STATIC void 136 xchk_rmapbt_check_unwritten_in_keyflags( 137 struct xchk_btree *bs) 138 { 139 struct xfs_scrub *sc = bs->sc; 140 struct xfs_btree_cur *cur = bs->cur; 141 struct xfs_btree_block *keyblock; 142 union xfs_btree_key *lkey, *hkey; 143 __be64 badflag = cpu_to_be64(XFS_RMAP_OFF_UNWRITTEN); 144 unsigned int level; 145 146 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_PREEN) 147 return; 148 149 for (level = 1; level < cur->bc_nlevels; level++) { 150 struct xfs_buf *bp; 151 unsigned int ptr; 152 153 /* Only check the first time we've seen this node block. */ 154 if (cur->bc_levels[level].ptr > 1) 155 continue; 156 157 keyblock = xfs_btree_get_block(cur, level, &bp); 158 for (ptr = 1; ptr <= be16_to_cpu(keyblock->bb_numrecs); ptr++) { 159 lkey = xfs_btree_key_addr(cur, ptr, keyblock); 160 161 if (lkey->rmap.rm_offset & badflag) { 162 xchk_btree_set_preen(sc, cur, level); 163 break; 164 } 165 166 hkey = xfs_btree_high_key_addr(cur, ptr, keyblock); 167 if (hkey->rmap.rm_offset & badflag) { 168 xchk_btree_set_preen(sc, cur, level); 169 break; 170 } 171 } 172 } 173 } 174 175 static inline bool 176 xchk_rmapbt_is_shareable( 177 struct xfs_scrub *sc, 178 const struct xfs_rmap_irec *irec) 179 { 180 if (!xfs_has_reflink(sc->mp)) 181 return false; 182 if (XFS_RMAP_NON_INODE_OWNER(irec->rm_owner)) 183 return false; 184 if (irec->rm_flags & (XFS_RMAP_BMBT_BLOCK | XFS_RMAP_ATTR_FORK | 185 XFS_RMAP_UNWRITTEN)) 186 return false; 187 return true; 188 } 189 190 /* Flag failures for records that overlap but cannot. */ 191 STATIC void 192 xchk_rmapbt_check_overlapping( 193 struct xchk_btree *bs, 194 struct xchk_rmap *cr, 195 const struct xfs_rmap_irec *irec) 196 { 197 xfs_agblock_t pnext, inext; 198 199 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 200 return; 201 202 /* No previous record? */ 203 if (cr->overlap_rec.rm_blockcount == 0) 204 goto set_prev; 205 206 /* Do overlap_rec and irec overlap? */ 207 pnext = cr->overlap_rec.rm_startblock + cr->overlap_rec.rm_blockcount; 208 if (pnext <= irec->rm_startblock) 209 goto set_prev; 210 211 /* Overlap is only allowed if both records are data fork mappings. */ 212 if (!xchk_rmapbt_is_shareable(bs->sc, &cr->overlap_rec) || 213 !xchk_rmapbt_is_shareable(bs->sc, irec)) 214 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 215 216 /* Save whichever rmap record extends furthest. */ 217 inext = irec->rm_startblock + irec->rm_blockcount; 218 if (pnext > inext) 219 return; 220 221 set_prev: 222 memcpy(&cr->overlap_rec, irec, sizeof(struct xfs_rmap_irec)); 223 } 224 225 /* Decide if two reverse-mapping records can be merged. */ 226 static inline bool 227 xchk_rmap_mergeable( 228 struct xchk_rmap *cr, 229 const struct xfs_rmap_irec *r2) 230 { 231 const struct xfs_rmap_irec *r1 = &cr->prev_rec; 232 233 /* Ignore if prev_rec is not yet initialized. */ 234 if (cr->prev_rec.rm_blockcount == 0) 235 return false; 236 237 if (r1->rm_owner != r2->rm_owner) 238 return false; 239 if (r1->rm_startblock + r1->rm_blockcount != r2->rm_startblock) 240 return false; 241 if ((unsigned long long)r1->rm_blockcount + r2->rm_blockcount > 242 XFS_RMAP_LEN_MAX) 243 return false; 244 if (XFS_RMAP_NON_INODE_OWNER(r2->rm_owner)) 245 return true; 246 /* must be an inode owner below here */ 247 if (r1->rm_flags != r2->rm_flags) 248 return false; 249 if (r1->rm_flags & XFS_RMAP_BMBT_BLOCK) 250 return true; 251 return r1->rm_offset + r1->rm_blockcount == r2->rm_offset; 252 } 253 254 /* Flag failures for records that could be merged. */ 255 STATIC void 256 xchk_rmapbt_check_mergeable( 257 struct xchk_btree *bs, 258 struct xchk_rmap *cr, 259 const struct xfs_rmap_irec *irec) 260 { 261 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 262 return; 263 264 if (xchk_rmap_mergeable(cr, irec)) 265 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 266 267 memcpy(&cr->prev_rec, irec, sizeof(struct xfs_rmap_irec)); 268 } 269 270 /* Compare an rmap for AG metadata against the metadata walk. */ 271 STATIC int 272 xchk_rmapbt_mark_bitmap( 273 struct xchk_btree *bs, 274 struct xchk_rmap *cr, 275 const struct xfs_rmap_irec *irec) 276 { 277 struct xfs_scrub *sc = bs->sc; 278 struct xagb_bitmap *bmp = NULL; 279 xfs_extlen_t fsbcount = irec->rm_blockcount; 280 281 /* 282 * Skip corrupt records. It is essential that we detect records in the 283 * btree that cannot overlap but do, flag those as CORRUPT, and skip 284 * the bitmap comparison to avoid generating false XCORRUPT reports. 285 */ 286 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 287 return 0; 288 289 /* 290 * If the AG metadata walk didn't complete, there's no point in 291 * comparing against partial results. 292 */ 293 if (!cr->bitmaps_complete) 294 return 0; 295 296 switch (irec->rm_owner) { 297 case XFS_RMAP_OWN_FS: 298 bmp = &cr->fs_owned; 299 break; 300 case XFS_RMAP_OWN_LOG: 301 bmp = &cr->log_owned; 302 break; 303 case XFS_RMAP_OWN_AG: 304 bmp = &cr->ag_owned; 305 break; 306 case XFS_RMAP_OWN_INOBT: 307 bmp = &cr->inobt_owned; 308 break; 309 case XFS_RMAP_OWN_REFC: 310 bmp = &cr->refcbt_owned; 311 break; 312 } 313 314 if (!bmp) 315 return 0; 316 317 if (xagb_bitmap_test(bmp, irec->rm_startblock, &fsbcount)) { 318 /* 319 * The start of this reverse mapping corresponds to a set 320 * region in the bitmap. If the mapping covers more area than 321 * the set region, then it covers space that wasn't found by 322 * the AG metadata walk. 323 */ 324 if (fsbcount < irec->rm_blockcount) 325 xchk_btree_xref_set_corrupt(bs->sc, 326 bs->sc->sa.rmap_cur, 0); 327 } else { 328 /* 329 * The start of this reverse mapping does not correspond to a 330 * completely set region in the bitmap. The region wasn't 331 * fully set by walking the AG metadata, so this is a 332 * cross-referencing corruption. 333 */ 334 xchk_btree_xref_set_corrupt(bs->sc, bs->sc->sa.rmap_cur, 0); 335 } 336 337 /* Unset the region so that we can detect missing rmap records. */ 338 return xagb_bitmap_clear(bmp, irec->rm_startblock, irec->rm_blockcount); 339 } 340 341 /* Scrub an rmapbt record. */ 342 STATIC int 343 xchk_rmapbt_rec( 344 struct xchk_btree *bs, 345 const union xfs_btree_rec *rec) 346 { 347 struct xchk_rmap *cr = bs->private; 348 struct xfs_rmap_irec irec; 349 350 if (xfs_rmap_btrec_to_irec(rec, &irec) != NULL || 351 xfs_rmap_check_irec(bs->cur, &irec) != NULL) { 352 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 353 return 0; 354 } 355 356 xchk_rmapbt_check_unwritten_in_keyflags(bs); 357 xchk_rmapbt_check_mergeable(bs, cr, &irec); 358 xchk_rmapbt_check_overlapping(bs, cr, &irec); 359 xchk_rmapbt_xref(bs->sc, &irec); 360 361 return xchk_rmapbt_mark_bitmap(bs, cr, &irec); 362 } 363 364 /* Add an AGFL block to the rmap list. */ 365 STATIC int 366 xchk_rmapbt_walk_agfl( 367 struct xfs_mount *mp, 368 xfs_agblock_t agbno, 369 void *priv) 370 { 371 struct xagb_bitmap *bitmap = priv; 372 373 return xagb_bitmap_set(bitmap, agbno, 1); 374 } 375 376 /* 377 * Set up bitmaps mapping all the AG metadata to compare with the rmapbt 378 * records. 379 * 380 * Grab our own btree cursors here if the scrub setup function didn't give us a 381 * btree cursor due to reports of poor health. We need to find out if the 382 * rmapbt disagrees with primary metadata btrees to tag the rmapbt as being 383 * XCORRUPT. 384 */ 385 STATIC int 386 xchk_rmapbt_walk_ag_metadata( 387 struct xfs_scrub *sc, 388 struct xchk_rmap *cr) 389 { 390 struct xfs_mount *mp = sc->mp; 391 struct xfs_buf *agfl_bp; 392 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 393 struct xfs_btree_cur *cur; 394 int error; 395 396 /* OWN_FS: AG headers */ 397 error = xagb_bitmap_set(&cr->fs_owned, XFS_SB_BLOCK(mp), 398 XFS_AGFL_BLOCK(mp) - XFS_SB_BLOCK(mp) + 1); 399 if (error) 400 goto out; 401 402 /* OWN_LOG: Internal log */ 403 if (xfs_ag_contains_log(mp, sc->sa.pag->pag_agno)) { 404 error = xagb_bitmap_set(&cr->log_owned, 405 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart), 406 mp->m_sb.sb_logblocks); 407 if (error) 408 goto out; 409 } 410 411 /* OWN_AG: bnobt, cntbt, rmapbt, and AGFL */ 412 cur = sc->sa.bno_cur; 413 if (!cur) 414 cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, 415 sc->sa.pag, XFS_BTNUM_BNO); 416 error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur); 417 if (cur != sc->sa.bno_cur) 418 xfs_btree_del_cursor(cur, error); 419 if (error) 420 goto out; 421 422 cur = sc->sa.cnt_cur; 423 if (!cur) 424 cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, 425 sc->sa.pag, XFS_BTNUM_CNT); 426 error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur); 427 if (cur != sc->sa.cnt_cur) 428 xfs_btree_del_cursor(cur, error); 429 if (error) 430 goto out; 431 432 error = xagb_bitmap_set_btblocks(&cr->ag_owned, sc->sa.rmap_cur); 433 if (error) 434 goto out; 435 436 error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); 437 if (error) 438 goto out; 439 440 error = xfs_agfl_walk(sc->mp, agf, agfl_bp, xchk_rmapbt_walk_agfl, 441 &cr->ag_owned); 442 xfs_trans_brelse(sc->tp, agfl_bp); 443 if (error) 444 goto out; 445 446 /* OWN_INOBT: inobt, finobt */ 447 cur = sc->sa.ino_cur; 448 if (!cur) 449 cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, sc->sa.agi_bp, 450 XFS_BTNUM_INO); 451 error = xagb_bitmap_set_btblocks(&cr->inobt_owned, cur); 452 if (cur != sc->sa.ino_cur) 453 xfs_btree_del_cursor(cur, error); 454 if (error) 455 goto out; 456 457 if (xfs_has_finobt(sc->mp)) { 458 cur = sc->sa.fino_cur; 459 if (!cur) 460 cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, 461 sc->sa.agi_bp, XFS_BTNUM_FINO); 462 error = xagb_bitmap_set_btblocks(&cr->inobt_owned, cur); 463 if (cur != sc->sa.fino_cur) 464 xfs_btree_del_cursor(cur, error); 465 if (error) 466 goto out; 467 } 468 469 /* OWN_REFC: refcountbt */ 470 if (xfs_has_reflink(sc->mp)) { 471 cur = sc->sa.refc_cur; 472 if (!cur) 473 cur = xfs_refcountbt_init_cursor(sc->mp, sc->tp, 474 sc->sa.agf_bp, sc->sa.pag); 475 error = xagb_bitmap_set_btblocks(&cr->refcbt_owned, cur); 476 if (cur != sc->sa.refc_cur) 477 xfs_btree_del_cursor(cur, error); 478 if (error) 479 goto out; 480 } 481 482 out: 483 /* 484 * If there's an error, set XFAIL and disable the bitmap 485 * cross-referencing checks, but proceed with the scrub anyway. 486 */ 487 if (error) 488 xchk_btree_xref_process_error(sc, sc->sa.rmap_cur, 489 sc->sa.rmap_cur->bc_nlevels - 1, &error); 490 else 491 cr->bitmaps_complete = true; 492 return 0; 493 } 494 495 /* 496 * Check for set regions in the bitmaps; if there are any, the rmap records do 497 * not describe all the AG metadata. 498 */ 499 STATIC void 500 xchk_rmapbt_check_bitmaps( 501 struct xfs_scrub *sc, 502 struct xchk_rmap *cr) 503 { 504 struct xfs_btree_cur *cur = sc->sa.rmap_cur; 505 unsigned int level; 506 507 if (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | 508 XFS_SCRUB_OFLAG_XFAIL)) 509 return; 510 if (!cur) 511 return; 512 level = cur->bc_nlevels - 1; 513 514 /* 515 * Any bitmap with bits still set indicates that the reverse mapping 516 * doesn't cover the entire primary structure. 517 */ 518 if (xagb_bitmap_hweight(&cr->fs_owned) != 0) 519 xchk_btree_xref_set_corrupt(sc, cur, level); 520 521 if (xagb_bitmap_hweight(&cr->log_owned) != 0) 522 xchk_btree_xref_set_corrupt(sc, cur, level); 523 524 if (xagb_bitmap_hweight(&cr->ag_owned) != 0) 525 xchk_btree_xref_set_corrupt(sc, cur, level); 526 527 if (xagb_bitmap_hweight(&cr->inobt_owned) != 0) 528 xchk_btree_xref_set_corrupt(sc, cur, level); 529 530 if (xagb_bitmap_hweight(&cr->refcbt_owned) != 0) 531 xchk_btree_xref_set_corrupt(sc, cur, level); 532 } 533 534 /* Scrub the rmap btree for some AG. */ 535 int 536 xchk_rmapbt( 537 struct xfs_scrub *sc) 538 { 539 struct xchk_rmap *cr; 540 int error; 541 542 cr = kzalloc(sizeof(struct xchk_rmap), XCHK_GFP_FLAGS); 543 if (!cr) 544 return -ENOMEM; 545 546 xagb_bitmap_init(&cr->fs_owned); 547 xagb_bitmap_init(&cr->log_owned); 548 xagb_bitmap_init(&cr->ag_owned); 549 xagb_bitmap_init(&cr->inobt_owned); 550 xagb_bitmap_init(&cr->refcbt_owned); 551 552 error = xchk_rmapbt_walk_ag_metadata(sc, cr); 553 if (error) 554 goto out; 555 556 error = xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec, 557 &XFS_RMAP_OINFO_AG, cr); 558 if (error) 559 goto out; 560 561 xchk_rmapbt_check_bitmaps(sc, cr); 562 563 out: 564 xagb_bitmap_destroy(&cr->refcbt_owned); 565 xagb_bitmap_destroy(&cr->inobt_owned); 566 xagb_bitmap_destroy(&cr->ag_owned); 567 xagb_bitmap_destroy(&cr->log_owned); 568 xagb_bitmap_destroy(&cr->fs_owned); 569 kfree(cr); 570 return error; 571 } 572 573 /* xref check that the extent is owned only by a given owner */ 574 void 575 xchk_xref_is_only_owned_by( 576 struct xfs_scrub *sc, 577 xfs_agblock_t bno, 578 xfs_extlen_t len, 579 const struct xfs_owner_info *oinfo) 580 { 581 struct xfs_rmap_matches res; 582 int error; 583 584 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) 585 return; 586 587 error = xfs_rmap_count_owners(sc->sa.rmap_cur, bno, len, oinfo, &res); 588 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 589 return; 590 if (res.matches != 1) 591 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 592 if (res.bad_non_owner_matches) 593 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 594 if (res.non_owner_matches) 595 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 596 } 597 598 /* xref check that the extent is not owned by a given owner */ 599 void 600 xchk_xref_is_not_owned_by( 601 struct xfs_scrub *sc, 602 xfs_agblock_t bno, 603 xfs_extlen_t len, 604 const struct xfs_owner_info *oinfo) 605 { 606 struct xfs_rmap_matches res; 607 int error; 608 609 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) 610 return; 611 612 error = xfs_rmap_count_owners(sc->sa.rmap_cur, bno, len, oinfo, &res); 613 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 614 return; 615 if (res.matches != 0) 616 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 617 if (res.bad_non_owner_matches) 618 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 619 } 620 621 /* xref check that the extent has no reverse mapping at all */ 622 void 623 xchk_xref_has_no_owner( 624 struct xfs_scrub *sc, 625 xfs_agblock_t bno, 626 xfs_extlen_t len) 627 { 628 enum xbtree_recpacking outcome; 629 int error; 630 631 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) 632 return; 633 634 error = xfs_rmap_has_records(sc->sa.rmap_cur, bno, len, &outcome); 635 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 636 return; 637 if (outcome != XBTREE_RECPACKING_EMPTY) 638 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 639 } 640