1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_btree.h" 13 #include "xfs_bit.h" 14 #include "xfs_log_format.h" 15 #include "xfs_trans.h" 16 #include "xfs_inode.h" 17 #include "xfs_alloc.h" 18 #include "xfs_bmap.h" 19 #include "xfs_bmap_btree.h" 20 #include "xfs_rmap.h" 21 #include "xfs_rmap_btree.h" 22 #include "scrub/scrub.h" 23 #include "scrub/common.h" 24 #include "scrub/btree.h" 25 #include "xfs_ag.h" 26 27 /* Set us up with an inode's bmap. */ 28 int 29 xchk_setup_inode_bmap( 30 struct xfs_scrub *sc) 31 { 32 int error; 33 34 error = xchk_get_inode(sc); 35 if (error) 36 goto out; 37 38 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; 39 xfs_ilock(sc->ip, sc->ilock_flags); 40 41 /* 42 * We don't want any ephemeral data fork updates sitting around 43 * while we inspect block mappings, so wait for directio to finish 44 * and flush dirty data if we have delalloc reservations. 45 */ 46 if (S_ISREG(VFS_I(sc->ip)->i_mode) && 47 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) { 48 struct address_space *mapping = VFS_I(sc->ip)->i_mapping; 49 50 inode_dio_wait(VFS_I(sc->ip)); 51 52 /* 53 * Try to flush all incore state to disk before we examine the 54 * space mappings for the data fork. Leave accumulated errors 55 * in the mapping for the writer threads to consume. 56 * 57 * On ENOSPC or EIO writeback errors, we continue into the 58 * extent mapping checks because write failures do not 59 * necessarily imply anything about the correctness of the file 60 * metadata. The metadata and the file data could be on 61 * completely separate devices; a media failure might only 62 * affect a subset of the disk, etc. We can handle delalloc 63 * extents in the scrubber, so leaving them in memory is fine. 64 */ 65 error = filemap_fdatawrite(mapping); 66 if (!error) 67 error = filemap_fdatawait_keep_errors(mapping); 68 if (error && (error != -ENOSPC && error != -EIO)) 69 goto out; 70 } 71 72 /* Got the inode, lock it and we're ready to go. */ 73 error = xchk_trans_alloc(sc, 0); 74 if (error) 75 goto out; 76 sc->ilock_flags |= XFS_ILOCK_EXCL; 77 xfs_ilock(sc->ip, XFS_ILOCK_EXCL); 78 79 out: 80 /* scrub teardown will unlock and release the inode */ 81 return error; 82 } 83 84 /* 85 * Inode fork block mapping (BMBT) scrubber. 86 * More complex than the others because we have to scrub 87 * all the extents regardless of whether or not the fork 88 * is in btree format. 89 */ 90 91 struct xchk_bmap_info { 92 struct xfs_scrub *sc; 93 xfs_fileoff_t lastoff; 94 bool is_rt; 95 bool is_shared; 96 bool was_loaded; 97 int whichfork; 98 }; 99 100 /* Look for a corresponding rmap for this irec. */ 101 static inline bool 102 xchk_bmap_get_rmap( 103 struct xchk_bmap_info *info, 104 struct xfs_bmbt_irec *irec, 105 xfs_agblock_t agbno, 106 uint64_t owner, 107 struct xfs_rmap_irec *rmap) 108 { 109 xfs_fileoff_t offset; 110 unsigned int rflags = 0; 111 int has_rmap; 112 int error; 113 114 if (info->whichfork == XFS_ATTR_FORK) 115 rflags |= XFS_RMAP_ATTR_FORK; 116 if (irec->br_state == XFS_EXT_UNWRITTEN) 117 rflags |= XFS_RMAP_UNWRITTEN; 118 119 /* 120 * CoW staging extents are owned (on disk) by the refcountbt, so 121 * their rmaps do not have offsets. 122 */ 123 if (info->whichfork == XFS_COW_FORK) 124 offset = 0; 125 else 126 offset = irec->br_startoff; 127 128 /* 129 * If the caller thinks this could be a shared bmbt extent (IOWs, 130 * any data fork extent of a reflink inode) then we have to use the 131 * range rmap lookup to make sure we get the correct owner/offset. 132 */ 133 if (info->is_shared) { 134 error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno, 135 owner, offset, rflags, rmap, &has_rmap); 136 if (!xchk_should_check_xref(info->sc, &error, 137 &info->sc->sa.rmap_cur)) 138 return false; 139 goto out; 140 } 141 142 /* 143 * Otherwise, use the (faster) regular lookup. 144 */ 145 error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner, 146 offset, rflags, &has_rmap); 147 if (!xchk_should_check_xref(info->sc, &error, 148 &info->sc->sa.rmap_cur)) 149 return false; 150 if (!has_rmap) 151 goto out; 152 153 error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap); 154 if (!xchk_should_check_xref(info->sc, &error, 155 &info->sc->sa.rmap_cur)) 156 return false; 157 158 out: 159 if (!has_rmap) 160 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 161 irec->br_startoff); 162 return has_rmap; 163 } 164 165 /* Make sure that we have rmapbt records for this extent. */ 166 STATIC void 167 xchk_bmap_xref_rmap( 168 struct xchk_bmap_info *info, 169 struct xfs_bmbt_irec *irec, 170 xfs_agblock_t agbno) 171 { 172 struct xfs_rmap_irec rmap; 173 unsigned long long rmap_end; 174 uint64_t owner; 175 176 if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm)) 177 return; 178 179 if (info->whichfork == XFS_COW_FORK) 180 owner = XFS_RMAP_OWN_COW; 181 else 182 owner = info->sc->ip->i_ino; 183 184 /* Find the rmap record for this irec. */ 185 if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap)) 186 return; 187 188 /* Check the rmap. */ 189 rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount; 190 if (rmap.rm_startblock > agbno || 191 agbno + irec->br_blockcount > rmap_end) 192 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 193 irec->br_startoff); 194 195 /* 196 * Check the logical offsets if applicable. CoW staging extents 197 * don't track logical offsets since the mappings only exist in 198 * memory. 199 */ 200 if (info->whichfork != XFS_COW_FORK) { 201 rmap_end = (unsigned long long)rmap.rm_offset + 202 rmap.rm_blockcount; 203 if (rmap.rm_offset > irec->br_startoff || 204 irec->br_startoff + irec->br_blockcount > rmap_end) 205 xchk_fblock_xref_set_corrupt(info->sc, 206 info->whichfork, irec->br_startoff); 207 } 208 209 if (rmap.rm_owner != owner) 210 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 211 irec->br_startoff); 212 213 /* 214 * Check for discrepancies between the unwritten flag in the irec and 215 * the rmap. Note that the (in-memory) CoW fork distinguishes between 216 * unwritten and written extents, but we don't track that in the rmap 217 * records because the blocks are owned (on-disk) by the refcountbt, 218 * which doesn't track unwritten state. 219 */ 220 if (owner != XFS_RMAP_OWN_COW && 221 !!(irec->br_state == XFS_EXT_UNWRITTEN) != 222 !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN)) 223 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 224 irec->br_startoff); 225 226 if (!!(info->whichfork == XFS_ATTR_FORK) != 227 !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK)) 228 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 229 irec->br_startoff); 230 if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK) 231 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 232 irec->br_startoff); 233 } 234 235 /* Cross-reference a single rtdev extent record. */ 236 STATIC void 237 xchk_bmap_rt_iextent_xref( 238 struct xfs_inode *ip, 239 struct xchk_bmap_info *info, 240 struct xfs_bmbt_irec *irec) 241 { 242 xchk_xref_is_used_rt_space(info->sc, irec->br_startblock, 243 irec->br_blockcount); 244 } 245 246 /* Cross-reference a single datadev extent record. */ 247 STATIC void 248 xchk_bmap_iextent_xref( 249 struct xfs_inode *ip, 250 struct xchk_bmap_info *info, 251 struct xfs_bmbt_irec *irec) 252 { 253 struct xfs_mount *mp = info->sc->mp; 254 xfs_agnumber_t agno; 255 xfs_agblock_t agbno; 256 xfs_extlen_t len; 257 int error; 258 259 agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock); 260 agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock); 261 len = irec->br_blockcount; 262 263 error = xchk_ag_init(info->sc, agno, &info->sc->sa); 264 if (!xchk_fblock_process_error(info->sc, info->whichfork, 265 irec->br_startoff, &error)) 266 return; 267 268 xchk_xref_is_used_space(info->sc, agbno, len); 269 xchk_xref_is_not_inode_chunk(info->sc, agbno, len); 270 xchk_bmap_xref_rmap(info, irec, agbno); 271 switch (info->whichfork) { 272 case XFS_DATA_FORK: 273 if (xfs_is_reflink_inode(info->sc->ip)) 274 break; 275 fallthrough; 276 case XFS_ATTR_FORK: 277 xchk_xref_is_not_shared(info->sc, agbno, 278 irec->br_blockcount); 279 break; 280 case XFS_COW_FORK: 281 xchk_xref_is_cow_staging(info->sc, agbno, 282 irec->br_blockcount); 283 break; 284 } 285 286 xchk_ag_free(info->sc, &info->sc->sa); 287 } 288 289 /* 290 * Directories and attr forks should never have blocks that can't be addressed 291 * by a xfs_dablk_t. 292 */ 293 STATIC void 294 xchk_bmap_dirattr_extent( 295 struct xfs_inode *ip, 296 struct xchk_bmap_info *info, 297 struct xfs_bmbt_irec *irec) 298 { 299 struct xfs_mount *mp = ip->i_mount; 300 xfs_fileoff_t off; 301 302 if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK) 303 return; 304 305 if (!xfs_verify_dablk(mp, irec->br_startoff)) 306 xchk_fblock_set_corrupt(info->sc, info->whichfork, 307 irec->br_startoff); 308 309 off = irec->br_startoff + irec->br_blockcount - 1; 310 if (!xfs_verify_dablk(mp, off)) 311 xchk_fblock_set_corrupt(info->sc, info->whichfork, off); 312 } 313 314 /* Scrub a single extent record. */ 315 STATIC int 316 xchk_bmap_iextent( 317 struct xfs_inode *ip, 318 struct xchk_bmap_info *info, 319 struct xfs_bmbt_irec *irec) 320 { 321 struct xfs_mount *mp = info->sc->mp; 322 int error = 0; 323 324 /* 325 * Check for out-of-order extents. This record could have come 326 * from the incore list, for which there is no ordering check. 327 */ 328 if (irec->br_startoff < info->lastoff) 329 xchk_fblock_set_corrupt(info->sc, info->whichfork, 330 irec->br_startoff); 331 332 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount)) 333 xchk_fblock_set_corrupt(info->sc, info->whichfork, 334 irec->br_startoff); 335 336 xchk_bmap_dirattr_extent(ip, info, irec); 337 338 /* There should never be a "hole" extent in either extent list. */ 339 if (irec->br_startblock == HOLESTARTBLOCK) 340 xchk_fblock_set_corrupt(info->sc, info->whichfork, 341 irec->br_startoff); 342 343 /* 344 * Check for delalloc extents. We never iterate the ones in the 345 * in-core extent scan, and we should never see these in the bmbt. 346 */ 347 if (isnullstartblock(irec->br_startblock)) 348 xchk_fblock_set_corrupt(info->sc, info->whichfork, 349 irec->br_startoff); 350 351 /* Make sure the extent points to a valid place. */ 352 if (irec->br_blockcount > MAXEXTLEN) 353 xchk_fblock_set_corrupt(info->sc, info->whichfork, 354 irec->br_startoff); 355 if (info->is_rt && 356 !xfs_verify_rtext(mp, irec->br_startblock, irec->br_blockcount)) 357 xchk_fblock_set_corrupt(info->sc, info->whichfork, 358 irec->br_startoff); 359 if (!info->is_rt && 360 !xfs_verify_fsbext(mp, irec->br_startblock, irec->br_blockcount)) 361 xchk_fblock_set_corrupt(info->sc, info->whichfork, 362 irec->br_startoff); 363 364 /* We don't allow unwritten extents on attr forks. */ 365 if (irec->br_state == XFS_EXT_UNWRITTEN && 366 info->whichfork == XFS_ATTR_FORK) 367 xchk_fblock_set_corrupt(info->sc, info->whichfork, 368 irec->br_startoff); 369 370 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 371 return 0; 372 373 if (info->is_rt) 374 xchk_bmap_rt_iextent_xref(ip, info, irec); 375 else 376 xchk_bmap_iextent_xref(ip, info, irec); 377 378 info->lastoff = irec->br_startoff + irec->br_blockcount; 379 return error; 380 } 381 382 /* Scrub a bmbt record. */ 383 STATIC int 384 xchk_bmapbt_rec( 385 struct xchk_btree *bs, 386 union xfs_btree_rec *rec) 387 { 388 struct xfs_bmbt_irec irec; 389 struct xfs_bmbt_irec iext_irec; 390 struct xfs_iext_cursor icur; 391 struct xchk_bmap_info *info = bs->private; 392 struct xfs_inode *ip = bs->cur->bc_ino.ip; 393 struct xfs_buf *bp = NULL; 394 struct xfs_btree_block *block; 395 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, info->whichfork); 396 uint64_t owner; 397 int i; 398 399 /* 400 * Check the owners of the btree blocks up to the level below 401 * the root since the verifiers don't do that. 402 */ 403 if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) && 404 bs->cur->bc_ptrs[0] == 1) { 405 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) { 406 block = xfs_btree_get_block(bs->cur, i, &bp); 407 owner = be64_to_cpu(block->bb_u.l.bb_owner); 408 if (owner != ip->i_ino) 409 xchk_fblock_set_corrupt(bs->sc, 410 info->whichfork, 0); 411 } 412 } 413 414 /* 415 * Check that the incore extent tree contains an extent that matches 416 * this one exactly. We validate those cached bmaps later, so we don't 417 * need to check them here. If the incore extent tree was just loaded 418 * from disk by the scrubber, we assume that its contents match what's 419 * on disk (we still hold the ILOCK) and skip the equivalence check. 420 */ 421 if (!info->was_loaded) 422 return 0; 423 424 xfs_bmbt_disk_get_all(&rec->bmbt, &irec); 425 if (!xfs_iext_lookup_extent(ip, ifp, irec.br_startoff, &icur, 426 &iext_irec) || 427 irec.br_startoff != iext_irec.br_startoff || 428 irec.br_startblock != iext_irec.br_startblock || 429 irec.br_blockcount != iext_irec.br_blockcount || 430 irec.br_state != iext_irec.br_state) 431 xchk_fblock_set_corrupt(bs->sc, info->whichfork, 432 irec.br_startoff); 433 return 0; 434 } 435 436 /* Scan the btree records. */ 437 STATIC int 438 xchk_bmap_btree( 439 struct xfs_scrub *sc, 440 int whichfork, 441 struct xchk_bmap_info *info) 442 { 443 struct xfs_owner_info oinfo; 444 struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork); 445 struct xfs_mount *mp = sc->mp; 446 struct xfs_inode *ip = sc->ip; 447 struct xfs_btree_cur *cur; 448 int error; 449 450 /* Load the incore bmap cache if it's not loaded. */ 451 info->was_loaded = !xfs_need_iread_extents(ifp); 452 453 error = xfs_iread_extents(sc->tp, ip, whichfork); 454 if (!xchk_fblock_process_error(sc, whichfork, 0, &error)) 455 goto out; 456 457 /* Check the btree structure. */ 458 cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork); 459 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 460 error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info); 461 xfs_btree_del_cursor(cur, error); 462 out: 463 return error; 464 } 465 466 struct xchk_bmap_check_rmap_info { 467 struct xfs_scrub *sc; 468 int whichfork; 469 struct xfs_iext_cursor icur; 470 }; 471 472 /* Can we find bmaps that fit this rmap? */ 473 STATIC int 474 xchk_bmap_check_rmap( 475 struct xfs_btree_cur *cur, 476 struct xfs_rmap_irec *rec, 477 void *priv) 478 { 479 struct xfs_bmbt_irec irec; 480 struct xchk_bmap_check_rmap_info *sbcri = priv; 481 struct xfs_ifork *ifp; 482 struct xfs_scrub *sc = sbcri->sc; 483 bool have_map; 484 485 /* Is this even the right fork? */ 486 if (rec->rm_owner != sc->ip->i_ino) 487 return 0; 488 if ((sbcri->whichfork == XFS_ATTR_FORK) ^ 489 !!(rec->rm_flags & XFS_RMAP_ATTR_FORK)) 490 return 0; 491 if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK) 492 return 0; 493 494 /* Now look up the bmbt record. */ 495 ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork); 496 if (!ifp) { 497 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 498 rec->rm_offset); 499 goto out; 500 } 501 have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset, 502 &sbcri->icur, &irec); 503 if (!have_map) 504 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 505 rec->rm_offset); 506 /* 507 * bmap extent record lengths are constrained to 2^21 blocks in length 508 * because of space constraints in the on-disk metadata structure. 509 * However, rmap extent record lengths are constrained only by AG 510 * length, so we have to loop through the bmbt to make sure that the 511 * entire rmap is covered by bmbt records. 512 */ 513 while (have_map) { 514 if (irec.br_startoff != rec->rm_offset) 515 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 516 rec->rm_offset); 517 if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp, 518 cur->bc_ag.pag->pag_agno, rec->rm_startblock)) 519 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 520 rec->rm_offset); 521 if (irec.br_blockcount > rec->rm_blockcount) 522 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 523 rec->rm_offset); 524 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 525 break; 526 rec->rm_startblock += irec.br_blockcount; 527 rec->rm_offset += irec.br_blockcount; 528 rec->rm_blockcount -= irec.br_blockcount; 529 if (rec->rm_blockcount == 0) 530 break; 531 have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec); 532 if (!have_map) 533 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 534 rec->rm_offset); 535 } 536 537 out: 538 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 539 return -ECANCELED; 540 return 0; 541 } 542 543 /* Make sure each rmap has a corresponding bmbt entry. */ 544 STATIC int 545 xchk_bmap_check_ag_rmaps( 546 struct xfs_scrub *sc, 547 int whichfork, 548 struct xfs_perag *pag) 549 { 550 struct xchk_bmap_check_rmap_info sbcri; 551 struct xfs_btree_cur *cur; 552 struct xfs_buf *agf; 553 int error; 554 555 error = xfs_alloc_read_agf(sc->mp, sc->tp, pag->pag_agno, 0, &agf); 556 if (error) 557 return error; 558 559 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, pag); 560 561 sbcri.sc = sc; 562 sbcri.whichfork = whichfork; 563 error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri); 564 if (error == -ECANCELED) 565 error = 0; 566 567 xfs_btree_del_cursor(cur, error); 568 xfs_trans_brelse(sc->tp, agf); 569 return error; 570 } 571 572 /* Make sure each rmap has a corresponding bmbt entry. */ 573 STATIC int 574 xchk_bmap_check_rmaps( 575 struct xfs_scrub *sc, 576 int whichfork) 577 { 578 struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork); 579 struct xfs_perag *pag; 580 xfs_agnumber_t agno; 581 bool zero_size; 582 int error; 583 584 if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) || 585 whichfork == XFS_COW_FORK || 586 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 587 return 0; 588 589 /* Don't support realtime rmap checks yet. */ 590 if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK) 591 return 0; 592 593 ASSERT(XFS_IFORK_PTR(sc->ip, whichfork) != NULL); 594 595 /* 596 * Only do this for complex maps that are in btree format, or for 597 * situations where we would seem to have a size but zero extents. 598 * The inode repair code can zap broken iforks, which means we have 599 * to flag this bmap as corrupt if there are rmaps that need to be 600 * reattached. 601 */ 602 603 if (whichfork == XFS_DATA_FORK) 604 zero_size = i_size_read(VFS_I(sc->ip)) == 0; 605 else 606 zero_size = false; 607 608 if (ifp->if_format != XFS_DINODE_FMT_BTREE && 609 (zero_size || ifp->if_nextents > 0)) 610 return 0; 611 612 for_each_perag(sc->mp, agno, pag) { 613 error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag); 614 if (error) 615 break; 616 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 617 break; 618 } 619 if (pag) 620 xfs_perag_put(pag); 621 return error; 622 } 623 624 /* 625 * Scrub an inode fork's block mappings. 626 * 627 * First we scan every record in every btree block, if applicable. 628 * Then we unconditionally scan the incore extent cache. 629 */ 630 STATIC int 631 xchk_bmap( 632 struct xfs_scrub *sc, 633 int whichfork) 634 { 635 struct xfs_bmbt_irec irec; 636 struct xchk_bmap_info info = { NULL }; 637 struct xfs_mount *mp = sc->mp; 638 struct xfs_inode *ip = sc->ip; 639 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 640 xfs_fileoff_t endoff; 641 struct xfs_iext_cursor icur; 642 int error = 0; 643 644 /* Non-existent forks can be ignored. */ 645 if (!ifp) 646 goto out; 647 648 info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip); 649 info.whichfork = whichfork; 650 info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip); 651 info.sc = sc; 652 653 switch (whichfork) { 654 case XFS_COW_FORK: 655 /* No CoW forks on non-reflink inodes/filesystems. */ 656 if (!xfs_is_reflink_inode(ip)) { 657 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 658 goto out; 659 } 660 break; 661 case XFS_ATTR_FORK: 662 if (!xfs_sb_version_hasattr(&mp->m_sb) && 663 !xfs_sb_version_hasattr2(&mp->m_sb)) 664 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 665 break; 666 default: 667 ASSERT(whichfork == XFS_DATA_FORK); 668 break; 669 } 670 671 /* Check the fork values */ 672 switch (ifp->if_format) { 673 case XFS_DINODE_FMT_UUID: 674 case XFS_DINODE_FMT_DEV: 675 case XFS_DINODE_FMT_LOCAL: 676 /* No mappings to check. */ 677 goto out; 678 case XFS_DINODE_FMT_EXTENTS: 679 break; 680 case XFS_DINODE_FMT_BTREE: 681 if (whichfork == XFS_COW_FORK) { 682 xchk_fblock_set_corrupt(sc, whichfork, 0); 683 goto out; 684 } 685 686 error = xchk_bmap_btree(sc, whichfork, &info); 687 if (error) 688 goto out; 689 break; 690 default: 691 xchk_fblock_set_corrupt(sc, whichfork, 0); 692 goto out; 693 } 694 695 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 696 goto out; 697 698 /* Find the offset of the last extent in the mapping. */ 699 error = xfs_bmap_last_offset(ip, &endoff, whichfork); 700 if (!xchk_fblock_process_error(sc, whichfork, 0, &error)) 701 goto out; 702 703 /* Scrub extent records. */ 704 info.lastoff = 0; 705 ifp = XFS_IFORK_PTR(ip, whichfork); 706 for_each_xfs_iext(ifp, &icur, &irec) { 707 if (xchk_should_terminate(sc, &error) || 708 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 709 goto out; 710 if (isnullstartblock(irec.br_startblock)) 711 continue; 712 if (irec.br_startoff >= endoff) { 713 xchk_fblock_set_corrupt(sc, whichfork, 714 irec.br_startoff); 715 goto out; 716 } 717 error = xchk_bmap_iextent(ip, &info, &irec); 718 if (error) 719 goto out; 720 } 721 722 error = xchk_bmap_check_rmaps(sc, whichfork); 723 if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error)) 724 goto out; 725 out: 726 return error; 727 } 728 729 /* Scrub an inode's data fork. */ 730 int 731 xchk_bmap_data( 732 struct xfs_scrub *sc) 733 { 734 return xchk_bmap(sc, XFS_DATA_FORK); 735 } 736 737 /* Scrub an inode's attr fork. */ 738 int 739 xchk_bmap_attr( 740 struct xfs_scrub *sc) 741 { 742 return xchk_bmap(sc, XFS_ATTR_FORK); 743 } 744 745 /* Scrub an inode's CoW fork. */ 746 int 747 xchk_bmap_cow( 748 struct xfs_scrub *sc) 749 { 750 if (!xfs_is_reflink_inode(sc->ip)) 751 return -ENOENT; 752 753 return xchk_bmap(sc, XFS_COW_FORK); 754 } 755