1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_defer.h" 13 #include "xfs_btree.h" 14 #include "xfs_bit.h" 15 #include "xfs_log_format.h" 16 #include "xfs_trans.h" 17 #include "xfs_sb.h" 18 #include "xfs_inode.h" 19 #include "xfs_inode_fork.h" 20 #include "xfs_alloc.h" 21 #include "xfs_rtalloc.h" 22 #include "xfs_bmap.h" 23 #include "xfs_bmap_util.h" 24 #include "xfs_bmap_btree.h" 25 #include "xfs_rmap.h" 26 #include "xfs_rmap_btree.h" 27 #include "xfs_refcount.h" 28 #include "scrub/xfs_scrub.h" 29 #include "scrub/scrub.h" 30 #include "scrub/common.h" 31 #include "scrub/btree.h" 32 #include "scrub/trace.h" 33 34 /* Set us up with an inode's bmap. */ 35 int 36 xchk_setup_inode_bmap( 37 struct xfs_scrub *sc, 38 struct xfs_inode *ip) 39 { 40 int error; 41 42 error = xchk_get_inode(sc, ip); 43 if (error) 44 goto out; 45 46 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; 47 xfs_ilock(sc->ip, sc->ilock_flags); 48 49 /* 50 * We don't want any ephemeral data fork updates sitting around 51 * while we inspect block mappings, so wait for directio to finish 52 * and flush dirty data if we have delalloc reservations. 53 */ 54 if (S_ISREG(VFS_I(sc->ip)->i_mode) && 55 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) { 56 inode_dio_wait(VFS_I(sc->ip)); 57 error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping); 58 if (error) 59 goto out; 60 } 61 62 /* Got the inode, lock it and we're ready to go. */ 63 error = xchk_trans_alloc(sc, 0); 64 if (error) 65 goto out; 66 sc->ilock_flags |= XFS_ILOCK_EXCL; 67 xfs_ilock(sc->ip, XFS_ILOCK_EXCL); 68 69 out: 70 /* scrub teardown will unlock and release the inode */ 71 return error; 72 } 73 74 /* 75 * Inode fork block mapping (BMBT) scrubber. 76 * More complex than the others because we have to scrub 77 * all the extents regardless of whether or not the fork 78 * is in btree format. 79 */ 80 81 struct xchk_bmap_info { 82 struct xfs_scrub *sc; 83 xfs_fileoff_t lastoff; 84 bool is_rt; 85 bool is_shared; 86 int whichfork; 87 }; 88 89 /* Look for a corresponding rmap for this irec. */ 90 static inline bool 91 xchk_bmap_get_rmap( 92 struct xchk_bmap_info *info, 93 struct xfs_bmbt_irec *irec, 94 xfs_agblock_t agbno, 95 uint64_t owner, 96 struct xfs_rmap_irec *rmap) 97 { 98 xfs_fileoff_t offset; 99 unsigned int rflags = 0; 100 int has_rmap; 101 int error; 102 103 if (info->whichfork == XFS_ATTR_FORK) 104 rflags |= XFS_RMAP_ATTR_FORK; 105 106 /* 107 * CoW staging extents are owned (on disk) by the refcountbt, so 108 * their rmaps do not have offsets. 109 */ 110 if (info->whichfork == XFS_COW_FORK) 111 offset = 0; 112 else 113 offset = irec->br_startoff; 114 115 /* 116 * If the caller thinks this could be a shared bmbt extent (IOWs, 117 * any data fork extent of a reflink inode) then we have to use the 118 * range rmap lookup to make sure we get the correct owner/offset. 119 */ 120 if (info->is_shared) { 121 error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno, 122 owner, offset, rflags, rmap, &has_rmap); 123 if (!xchk_should_check_xref(info->sc, &error, 124 &info->sc->sa.rmap_cur)) 125 return false; 126 goto out; 127 } 128 129 /* 130 * Otherwise, use the (faster) regular lookup. 131 */ 132 error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner, 133 offset, rflags, &has_rmap); 134 if (!xchk_should_check_xref(info->sc, &error, 135 &info->sc->sa.rmap_cur)) 136 return false; 137 if (!has_rmap) 138 goto out; 139 140 error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap); 141 if (!xchk_should_check_xref(info->sc, &error, 142 &info->sc->sa.rmap_cur)) 143 return false; 144 145 out: 146 if (!has_rmap) 147 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 148 irec->br_startoff); 149 return has_rmap; 150 } 151 152 /* Make sure that we have rmapbt records for this extent. */ 153 STATIC void 154 xchk_bmap_xref_rmap( 155 struct xchk_bmap_info *info, 156 struct xfs_bmbt_irec *irec, 157 xfs_agblock_t agbno) 158 { 159 struct xfs_rmap_irec rmap; 160 unsigned long long rmap_end; 161 uint64_t owner; 162 163 if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm)) 164 return; 165 166 if (info->whichfork == XFS_COW_FORK) 167 owner = XFS_RMAP_OWN_COW; 168 else 169 owner = info->sc->ip->i_ino; 170 171 /* Find the rmap record for this irec. */ 172 if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap)) 173 return; 174 175 /* Check the rmap. */ 176 rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount; 177 if (rmap.rm_startblock > agbno || 178 agbno + irec->br_blockcount > rmap_end) 179 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 180 irec->br_startoff); 181 182 /* 183 * Check the logical offsets if applicable. CoW staging extents 184 * don't track logical offsets since the mappings only exist in 185 * memory. 186 */ 187 if (info->whichfork != XFS_COW_FORK) { 188 rmap_end = (unsigned long long)rmap.rm_offset + 189 rmap.rm_blockcount; 190 if (rmap.rm_offset > irec->br_startoff || 191 irec->br_startoff + irec->br_blockcount > rmap_end) 192 xchk_fblock_xref_set_corrupt(info->sc, 193 info->whichfork, irec->br_startoff); 194 } 195 196 if (rmap.rm_owner != owner) 197 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 198 irec->br_startoff); 199 200 /* 201 * Check for discrepancies between the unwritten flag in the irec and 202 * the rmap. Note that the (in-memory) CoW fork distinguishes between 203 * unwritten and written extents, but we don't track that in the rmap 204 * records because the blocks are owned (on-disk) by the refcountbt, 205 * which doesn't track unwritten state. 206 */ 207 if (owner != XFS_RMAP_OWN_COW && 208 irec->br_state == XFS_EXT_UNWRITTEN && 209 !(rmap.rm_flags & XFS_RMAP_UNWRITTEN)) 210 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 211 irec->br_startoff); 212 213 if (info->whichfork == XFS_ATTR_FORK && 214 !(rmap.rm_flags & XFS_RMAP_ATTR_FORK)) 215 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 216 irec->br_startoff); 217 if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK) 218 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 219 irec->br_startoff); 220 } 221 222 /* Cross-reference a single rtdev extent record. */ 223 STATIC void 224 xchk_bmap_rt_extent_xref( 225 struct xchk_bmap_info *info, 226 struct xfs_inode *ip, 227 struct xfs_btree_cur *cur, 228 struct xfs_bmbt_irec *irec) 229 { 230 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 231 return; 232 233 xchk_xref_is_used_rt_space(info->sc, irec->br_startblock, 234 irec->br_blockcount); 235 } 236 237 /* Cross-reference a single datadev extent record. */ 238 STATIC void 239 xchk_bmap_extent_xref( 240 struct xchk_bmap_info *info, 241 struct xfs_inode *ip, 242 struct xfs_btree_cur *cur, 243 struct xfs_bmbt_irec *irec) 244 { 245 struct xfs_mount *mp = info->sc->mp; 246 xfs_agnumber_t agno; 247 xfs_agblock_t agbno; 248 xfs_extlen_t len; 249 int error; 250 251 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 252 return; 253 254 agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock); 255 agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock); 256 len = irec->br_blockcount; 257 258 error = xchk_ag_init(info->sc, agno, &info->sc->sa); 259 if (!xchk_fblock_process_error(info->sc, info->whichfork, 260 irec->br_startoff, &error)) 261 return; 262 263 xchk_xref_is_used_space(info->sc, agbno, len); 264 xchk_xref_is_not_inode_chunk(info->sc, agbno, len); 265 xchk_bmap_xref_rmap(info, irec, agbno); 266 switch (info->whichfork) { 267 case XFS_DATA_FORK: 268 if (xfs_is_reflink_inode(info->sc->ip)) 269 break; 270 /* fall through */ 271 case XFS_ATTR_FORK: 272 xchk_xref_is_not_shared(info->sc, agbno, 273 irec->br_blockcount); 274 break; 275 case XFS_COW_FORK: 276 xchk_xref_is_cow_staging(info->sc, agbno, 277 irec->br_blockcount); 278 break; 279 } 280 281 xchk_ag_free(info->sc, &info->sc->sa); 282 } 283 284 /* 285 * Directories and attr forks should never have blocks that can't be addressed 286 * by a xfs_dablk_t. 287 */ 288 STATIC void 289 xchk_bmap_dirattr_extent( 290 struct xfs_inode *ip, 291 struct xchk_bmap_info *info, 292 struct xfs_bmbt_irec *irec) 293 { 294 struct xfs_mount *mp = ip->i_mount; 295 xfs_fileoff_t off; 296 297 if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK) 298 return; 299 300 if (!xfs_verify_dablk(mp, irec->br_startoff)) 301 xchk_fblock_set_corrupt(info->sc, info->whichfork, 302 irec->br_startoff); 303 304 off = irec->br_startoff + irec->br_blockcount - 1; 305 if (!xfs_verify_dablk(mp, off)) 306 xchk_fblock_set_corrupt(info->sc, info->whichfork, off); 307 } 308 309 /* Scrub a single extent record. */ 310 STATIC int 311 xchk_bmap_extent( 312 struct xfs_inode *ip, 313 struct xfs_btree_cur *cur, 314 struct xchk_bmap_info *info, 315 struct xfs_bmbt_irec *irec) 316 { 317 struct xfs_mount *mp = info->sc->mp; 318 struct xfs_buf *bp = NULL; 319 xfs_filblks_t end; 320 int error = 0; 321 322 if (cur) 323 xfs_btree_get_block(cur, 0, &bp); 324 325 /* 326 * Check for out-of-order extents. This record could have come 327 * from the incore list, for which there is no ordering check. 328 */ 329 if (irec->br_startoff < info->lastoff) 330 xchk_fblock_set_corrupt(info->sc, info->whichfork, 331 irec->br_startoff); 332 333 xchk_bmap_dirattr_extent(ip, info, irec); 334 335 /* There should never be a "hole" extent in either extent list. */ 336 if (irec->br_startblock == HOLESTARTBLOCK) 337 xchk_fblock_set_corrupt(info->sc, info->whichfork, 338 irec->br_startoff); 339 340 /* 341 * Check for delalloc extents. We never iterate the ones in the 342 * in-core extent scan, and we should never see these in the bmbt. 343 */ 344 if (isnullstartblock(irec->br_startblock)) 345 xchk_fblock_set_corrupt(info->sc, info->whichfork, 346 irec->br_startoff); 347 348 /* Make sure the extent points to a valid place. */ 349 if (irec->br_blockcount > MAXEXTLEN) 350 xchk_fblock_set_corrupt(info->sc, info->whichfork, 351 irec->br_startoff); 352 if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock) 353 xchk_fblock_set_corrupt(info->sc, info->whichfork, 354 irec->br_startoff); 355 end = irec->br_startblock + irec->br_blockcount - 1; 356 if (info->is_rt && 357 (!xfs_verify_rtbno(mp, irec->br_startblock) || 358 !xfs_verify_rtbno(mp, end))) 359 xchk_fblock_set_corrupt(info->sc, info->whichfork, 360 irec->br_startoff); 361 if (!info->is_rt && 362 (!xfs_verify_fsbno(mp, irec->br_startblock) || 363 !xfs_verify_fsbno(mp, end) || 364 XFS_FSB_TO_AGNO(mp, irec->br_startblock) != 365 XFS_FSB_TO_AGNO(mp, end))) 366 xchk_fblock_set_corrupt(info->sc, info->whichfork, 367 irec->br_startoff); 368 369 /* We don't allow unwritten extents on attr forks. */ 370 if (irec->br_state == XFS_EXT_UNWRITTEN && 371 info->whichfork == XFS_ATTR_FORK) 372 xchk_fblock_set_corrupt(info->sc, info->whichfork, 373 irec->br_startoff); 374 375 if (info->is_rt) 376 xchk_bmap_rt_extent_xref(info, ip, cur, irec); 377 else 378 xchk_bmap_extent_xref(info, ip, cur, irec); 379 380 info->lastoff = irec->br_startoff + irec->br_blockcount; 381 return error; 382 } 383 384 /* Scrub a bmbt record. */ 385 STATIC int 386 xchk_bmapbt_rec( 387 struct xchk_btree *bs, 388 union xfs_btree_rec *rec) 389 { 390 struct xfs_bmbt_irec irec; 391 struct xchk_bmap_info *info = bs->private; 392 struct xfs_inode *ip = bs->cur->bc_private.b.ip; 393 struct xfs_buf *bp = NULL; 394 struct xfs_btree_block *block; 395 uint64_t owner; 396 int i; 397 398 /* 399 * Check the owners of the btree blocks up to the level below 400 * the root since the verifiers don't do that. 401 */ 402 if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) && 403 bs->cur->bc_ptrs[0] == 1) { 404 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) { 405 block = xfs_btree_get_block(bs->cur, i, &bp); 406 owner = be64_to_cpu(block->bb_u.l.bb_owner); 407 if (owner != ip->i_ino) 408 xchk_fblock_set_corrupt(bs->sc, 409 info->whichfork, 0); 410 } 411 } 412 413 /* Set up the in-core record and scrub it. */ 414 xfs_bmbt_disk_get_all(&rec->bmbt, &irec); 415 return xchk_bmap_extent(ip, bs->cur, info, &irec); 416 } 417 418 /* Scan the btree records. */ 419 STATIC int 420 xchk_bmap_btree( 421 struct xfs_scrub *sc, 422 int whichfork, 423 struct xchk_bmap_info *info) 424 { 425 struct xfs_owner_info oinfo; 426 struct xfs_mount *mp = sc->mp; 427 struct xfs_inode *ip = sc->ip; 428 struct xfs_btree_cur *cur; 429 int error; 430 431 cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork); 432 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 433 error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info); 434 xfs_btree_del_cursor(cur, error); 435 return error; 436 } 437 438 struct xchk_bmap_check_rmap_info { 439 struct xfs_scrub *sc; 440 int whichfork; 441 struct xfs_iext_cursor icur; 442 }; 443 444 /* Can we find bmaps that fit this rmap? */ 445 STATIC int 446 xchk_bmap_check_rmap( 447 struct xfs_btree_cur *cur, 448 struct xfs_rmap_irec *rec, 449 void *priv) 450 { 451 struct xfs_bmbt_irec irec; 452 struct xchk_bmap_check_rmap_info *sbcri = priv; 453 struct xfs_ifork *ifp; 454 struct xfs_scrub *sc = sbcri->sc; 455 bool have_map; 456 457 /* Is this even the right fork? */ 458 if (rec->rm_owner != sc->ip->i_ino) 459 return 0; 460 if ((sbcri->whichfork == XFS_ATTR_FORK) ^ 461 !!(rec->rm_flags & XFS_RMAP_ATTR_FORK)) 462 return 0; 463 if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK) 464 return 0; 465 466 /* Now look up the bmbt record. */ 467 ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork); 468 if (!ifp) { 469 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 470 rec->rm_offset); 471 goto out; 472 } 473 have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset, 474 &sbcri->icur, &irec); 475 if (!have_map) 476 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 477 rec->rm_offset); 478 /* 479 * bmap extent record lengths are constrained to 2^21 blocks in length 480 * because of space constraints in the on-disk metadata structure. 481 * However, rmap extent record lengths are constrained only by AG 482 * length, so we have to loop through the bmbt to make sure that the 483 * entire rmap is covered by bmbt records. 484 */ 485 while (have_map) { 486 if (irec.br_startoff != rec->rm_offset) 487 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 488 rec->rm_offset); 489 if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp, 490 cur->bc_private.a.agno, rec->rm_startblock)) 491 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 492 rec->rm_offset); 493 if (irec.br_blockcount > rec->rm_blockcount) 494 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 495 rec->rm_offset); 496 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 497 break; 498 rec->rm_startblock += irec.br_blockcount; 499 rec->rm_offset += irec.br_blockcount; 500 rec->rm_blockcount -= irec.br_blockcount; 501 if (rec->rm_blockcount == 0) 502 break; 503 have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec); 504 if (!have_map) 505 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 506 rec->rm_offset); 507 } 508 509 out: 510 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 511 return XFS_BTREE_QUERY_RANGE_ABORT; 512 return 0; 513 } 514 515 /* Make sure each rmap has a corresponding bmbt entry. */ 516 STATIC int 517 xchk_bmap_check_ag_rmaps( 518 struct xfs_scrub *sc, 519 int whichfork, 520 xfs_agnumber_t agno) 521 { 522 struct xchk_bmap_check_rmap_info sbcri; 523 struct xfs_btree_cur *cur; 524 struct xfs_buf *agf; 525 int error; 526 527 error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf); 528 if (error) 529 return error; 530 531 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno); 532 if (!cur) { 533 error = -ENOMEM; 534 goto out_agf; 535 } 536 537 sbcri.sc = sc; 538 sbcri.whichfork = whichfork; 539 error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri); 540 if (error == XFS_BTREE_QUERY_RANGE_ABORT) 541 error = 0; 542 543 xfs_btree_del_cursor(cur, error); 544 out_agf: 545 xfs_trans_brelse(sc->tp, agf); 546 return error; 547 } 548 549 /* Make sure each rmap has a corresponding bmbt entry. */ 550 STATIC int 551 xchk_bmap_check_rmaps( 552 struct xfs_scrub *sc, 553 int whichfork) 554 { 555 loff_t size; 556 xfs_agnumber_t agno; 557 int error; 558 559 if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) || 560 whichfork == XFS_COW_FORK || 561 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 562 return 0; 563 564 /* Don't support realtime rmap checks yet. */ 565 if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK) 566 return 0; 567 568 /* 569 * Only do this for complex maps that are in btree format, or for 570 * situations where we would seem to have a size but zero extents. 571 * The inode repair code can zap broken iforks, which means we have 572 * to flag this bmap as corrupt if there are rmaps that need to be 573 * reattached. 574 */ 575 switch (whichfork) { 576 case XFS_DATA_FORK: 577 size = i_size_read(VFS_I(sc->ip)); 578 break; 579 case XFS_ATTR_FORK: 580 size = XFS_IFORK_Q(sc->ip); 581 break; 582 default: 583 size = 0; 584 break; 585 } 586 if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE && 587 (size == 0 || XFS_IFORK_NEXTENTS(sc->ip, whichfork) > 0)) 588 return 0; 589 590 for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) { 591 error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno); 592 if (error) 593 return error; 594 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 595 break; 596 } 597 598 return 0; 599 } 600 601 /* 602 * Scrub an inode fork's block mappings. 603 * 604 * First we scan every record in every btree block, if applicable. 605 * Then we unconditionally scan the incore extent cache. 606 */ 607 STATIC int 608 xchk_bmap( 609 struct xfs_scrub *sc, 610 int whichfork) 611 { 612 struct xfs_bmbt_irec irec; 613 struct xchk_bmap_info info = { NULL }; 614 struct xfs_mount *mp = sc->mp; 615 struct xfs_inode *ip = sc->ip; 616 struct xfs_ifork *ifp; 617 xfs_fileoff_t endoff; 618 struct xfs_iext_cursor icur; 619 int error = 0; 620 621 ifp = XFS_IFORK_PTR(ip, whichfork); 622 623 info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip); 624 info.whichfork = whichfork; 625 info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip); 626 info.sc = sc; 627 628 switch (whichfork) { 629 case XFS_COW_FORK: 630 /* Non-existent CoW forks are ignorable. */ 631 if (!ifp) 632 goto out; 633 /* No CoW forks on non-reflink inodes/filesystems. */ 634 if (!xfs_is_reflink_inode(ip)) { 635 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 636 goto out; 637 } 638 break; 639 case XFS_ATTR_FORK: 640 if (!ifp) 641 goto out_check_rmap; 642 if (!xfs_sb_version_hasattr(&mp->m_sb) && 643 !xfs_sb_version_hasattr2(&mp->m_sb)) 644 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 645 break; 646 default: 647 ASSERT(whichfork == XFS_DATA_FORK); 648 break; 649 } 650 651 /* Check the fork values */ 652 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 653 case XFS_DINODE_FMT_UUID: 654 case XFS_DINODE_FMT_DEV: 655 case XFS_DINODE_FMT_LOCAL: 656 /* No mappings to check. */ 657 goto out; 658 case XFS_DINODE_FMT_EXTENTS: 659 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 660 xchk_fblock_set_corrupt(sc, whichfork, 0); 661 goto out; 662 } 663 break; 664 case XFS_DINODE_FMT_BTREE: 665 if (whichfork == XFS_COW_FORK) { 666 xchk_fblock_set_corrupt(sc, whichfork, 0); 667 goto out; 668 } 669 670 error = xchk_bmap_btree(sc, whichfork, &info); 671 if (error) 672 goto out; 673 break; 674 default: 675 xchk_fblock_set_corrupt(sc, whichfork, 0); 676 goto out; 677 } 678 679 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 680 goto out; 681 682 /* Now try to scrub the in-memory extent list. */ 683 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 684 error = xfs_iread_extents(sc->tp, ip, whichfork); 685 if (!xchk_fblock_process_error(sc, whichfork, 0, &error)) 686 goto out; 687 } 688 689 /* Find the offset of the last extent in the mapping. */ 690 error = xfs_bmap_last_offset(ip, &endoff, whichfork); 691 if (!xchk_fblock_process_error(sc, whichfork, 0, &error)) 692 goto out; 693 694 /* Scrub extent records. */ 695 info.lastoff = 0; 696 ifp = XFS_IFORK_PTR(ip, whichfork); 697 for_each_xfs_iext(ifp, &icur, &irec) { 698 if (xchk_should_terminate(sc, &error) || 699 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 700 break; 701 if (isnullstartblock(irec.br_startblock)) 702 continue; 703 if (irec.br_startoff >= endoff) { 704 xchk_fblock_set_corrupt(sc, whichfork, 705 irec.br_startoff); 706 goto out; 707 } 708 error = xchk_bmap_extent(ip, NULL, &info, &irec); 709 if (error) 710 goto out; 711 } 712 713 out_check_rmap: 714 error = xchk_bmap_check_rmaps(sc, whichfork); 715 if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error)) 716 goto out; 717 out: 718 return error; 719 } 720 721 /* Scrub an inode's data fork. */ 722 int 723 xchk_bmap_data( 724 struct xfs_scrub *sc) 725 { 726 return xchk_bmap(sc, XFS_DATA_FORK); 727 } 728 729 /* Scrub an inode's attr fork. */ 730 int 731 xchk_bmap_attr( 732 struct xfs_scrub *sc) 733 { 734 return xchk_bmap(sc, XFS_ATTR_FORK); 735 } 736 737 /* Scrub an inode's CoW fork. */ 738 int 739 xchk_bmap_cow( 740 struct xfs_scrub *sc) 741 { 742 if (!xfs_is_reflink_inode(sc->ip)) 743 return -ENOENT; 744 745 return xchk_bmap(sc, XFS_COW_FORK); 746 } 747