1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_btree.h" 13 #include "xfs_bit.h" 14 #include "xfs_log_format.h" 15 #include "xfs_trans.h" 16 #include "xfs_inode.h" 17 #include "xfs_alloc.h" 18 #include "xfs_bmap.h" 19 #include "xfs_bmap_btree.h" 20 #include "xfs_rmap.h" 21 #include "xfs_rmap_btree.h" 22 #include "scrub/scrub.h" 23 #include "scrub/common.h" 24 #include "scrub/btree.h" 25 26 /* Set us up with an inode's bmap. */ 27 int 28 xchk_setup_inode_bmap( 29 struct xfs_scrub *sc, 30 struct xfs_inode *ip) 31 { 32 int error; 33 34 error = xchk_get_inode(sc, ip); 35 if (error) 36 goto out; 37 38 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; 39 xfs_ilock(sc->ip, sc->ilock_flags); 40 41 /* 42 * We don't want any ephemeral data fork updates sitting around 43 * while we inspect block mappings, so wait for directio to finish 44 * and flush dirty data if we have delalloc reservations. 45 */ 46 if (S_ISREG(VFS_I(sc->ip)->i_mode) && 47 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) { 48 struct address_space *mapping = VFS_I(sc->ip)->i_mapping; 49 50 inode_dio_wait(VFS_I(sc->ip)); 51 52 /* 53 * Try to flush all incore state to disk before we examine the 54 * space mappings for the data fork. Leave accumulated errors 55 * in the mapping for the writer threads to consume. 56 * 57 * On ENOSPC or EIO writeback errors, we continue into the 58 * extent mapping checks because write failures do not 59 * necessarily imply anything about the correctness of the file 60 * metadata. The metadata and the file data could be on 61 * completely separate devices; a media failure might only 62 * affect a subset of the disk, etc. We can handle delalloc 63 * extents in the scrubber, so leaving them in memory is fine. 64 */ 65 error = filemap_fdatawrite(mapping); 66 if (!error) 67 error = filemap_fdatawait_keep_errors(mapping); 68 if (error && (error != -ENOSPC && error != -EIO)) 69 goto out; 70 } 71 72 /* Got the inode, lock it and we're ready to go. */ 73 error = xchk_trans_alloc(sc, 0); 74 if (error) 75 goto out; 76 sc->ilock_flags |= XFS_ILOCK_EXCL; 77 xfs_ilock(sc->ip, XFS_ILOCK_EXCL); 78 79 out: 80 /* scrub teardown will unlock and release the inode */ 81 return error; 82 } 83 84 /* 85 * Inode fork block mapping (BMBT) scrubber. 86 * More complex than the others because we have to scrub 87 * all the extents regardless of whether or not the fork 88 * is in btree format. 89 */ 90 91 struct xchk_bmap_info { 92 struct xfs_scrub *sc; 93 xfs_fileoff_t lastoff; 94 bool is_rt; 95 bool is_shared; 96 bool was_loaded; 97 int whichfork; 98 }; 99 100 /* Look for a corresponding rmap for this irec. */ 101 static inline bool 102 xchk_bmap_get_rmap( 103 struct xchk_bmap_info *info, 104 struct xfs_bmbt_irec *irec, 105 xfs_agblock_t agbno, 106 uint64_t owner, 107 struct xfs_rmap_irec *rmap) 108 { 109 xfs_fileoff_t offset; 110 unsigned int rflags = 0; 111 int has_rmap; 112 int error; 113 114 if (info->whichfork == XFS_ATTR_FORK) 115 rflags |= XFS_RMAP_ATTR_FORK; 116 if (irec->br_state == XFS_EXT_UNWRITTEN) 117 rflags |= XFS_RMAP_UNWRITTEN; 118 119 /* 120 * CoW staging extents are owned (on disk) by the refcountbt, so 121 * their rmaps do not have offsets. 122 */ 123 if (info->whichfork == XFS_COW_FORK) 124 offset = 0; 125 else 126 offset = irec->br_startoff; 127 128 /* 129 * If the caller thinks this could be a shared bmbt extent (IOWs, 130 * any data fork extent of a reflink inode) then we have to use the 131 * range rmap lookup to make sure we get the correct owner/offset. 132 */ 133 if (info->is_shared) { 134 error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno, 135 owner, offset, rflags, rmap, &has_rmap); 136 if (!xchk_should_check_xref(info->sc, &error, 137 &info->sc->sa.rmap_cur)) 138 return false; 139 goto out; 140 } 141 142 /* 143 * Otherwise, use the (faster) regular lookup. 144 */ 145 error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner, 146 offset, rflags, &has_rmap); 147 if (!xchk_should_check_xref(info->sc, &error, 148 &info->sc->sa.rmap_cur)) 149 return false; 150 if (!has_rmap) 151 goto out; 152 153 error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap); 154 if (!xchk_should_check_xref(info->sc, &error, 155 &info->sc->sa.rmap_cur)) 156 return false; 157 158 out: 159 if (!has_rmap) 160 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 161 irec->br_startoff); 162 return has_rmap; 163 } 164 165 /* Make sure that we have rmapbt records for this extent. */ 166 STATIC void 167 xchk_bmap_xref_rmap( 168 struct xchk_bmap_info *info, 169 struct xfs_bmbt_irec *irec, 170 xfs_agblock_t agbno) 171 { 172 struct xfs_rmap_irec rmap; 173 unsigned long long rmap_end; 174 uint64_t owner; 175 176 if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm)) 177 return; 178 179 if (info->whichfork == XFS_COW_FORK) 180 owner = XFS_RMAP_OWN_COW; 181 else 182 owner = info->sc->ip->i_ino; 183 184 /* Find the rmap record for this irec. */ 185 if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap)) 186 return; 187 188 /* Check the rmap. */ 189 rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount; 190 if (rmap.rm_startblock > agbno || 191 agbno + irec->br_blockcount > rmap_end) 192 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 193 irec->br_startoff); 194 195 /* 196 * Check the logical offsets if applicable. CoW staging extents 197 * don't track logical offsets since the mappings only exist in 198 * memory. 199 */ 200 if (info->whichfork != XFS_COW_FORK) { 201 rmap_end = (unsigned long long)rmap.rm_offset + 202 rmap.rm_blockcount; 203 if (rmap.rm_offset > irec->br_startoff || 204 irec->br_startoff + irec->br_blockcount > rmap_end) 205 xchk_fblock_xref_set_corrupt(info->sc, 206 info->whichfork, irec->br_startoff); 207 } 208 209 if (rmap.rm_owner != owner) 210 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 211 irec->br_startoff); 212 213 /* 214 * Check for discrepancies between the unwritten flag in the irec and 215 * the rmap. Note that the (in-memory) CoW fork distinguishes between 216 * unwritten and written extents, but we don't track that in the rmap 217 * records because the blocks are owned (on-disk) by the refcountbt, 218 * which doesn't track unwritten state. 219 */ 220 if (owner != XFS_RMAP_OWN_COW && 221 !!(irec->br_state == XFS_EXT_UNWRITTEN) != 222 !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN)) 223 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 224 irec->br_startoff); 225 226 if (!!(info->whichfork == XFS_ATTR_FORK) != 227 !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK)) 228 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 229 irec->br_startoff); 230 if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK) 231 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, 232 irec->br_startoff); 233 } 234 235 /* Cross-reference a single rtdev extent record. */ 236 STATIC void 237 xchk_bmap_rt_iextent_xref( 238 struct xfs_inode *ip, 239 struct xchk_bmap_info *info, 240 struct xfs_bmbt_irec *irec) 241 { 242 xchk_xref_is_used_rt_space(info->sc, irec->br_startblock, 243 irec->br_blockcount); 244 } 245 246 /* Cross-reference a single datadev extent record. */ 247 STATIC void 248 xchk_bmap_iextent_xref( 249 struct xfs_inode *ip, 250 struct xchk_bmap_info *info, 251 struct xfs_bmbt_irec *irec) 252 { 253 struct xfs_mount *mp = info->sc->mp; 254 xfs_agnumber_t agno; 255 xfs_agblock_t agbno; 256 xfs_extlen_t len; 257 int error; 258 259 agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock); 260 agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock); 261 len = irec->br_blockcount; 262 263 error = xchk_ag_init(info->sc, agno, &info->sc->sa); 264 if (!xchk_fblock_process_error(info->sc, info->whichfork, 265 irec->br_startoff, &error)) 266 return; 267 268 xchk_xref_is_used_space(info->sc, agbno, len); 269 xchk_xref_is_not_inode_chunk(info->sc, agbno, len); 270 xchk_bmap_xref_rmap(info, irec, agbno); 271 switch (info->whichfork) { 272 case XFS_DATA_FORK: 273 if (xfs_is_reflink_inode(info->sc->ip)) 274 break; 275 /* fall through */ 276 case XFS_ATTR_FORK: 277 xchk_xref_is_not_shared(info->sc, agbno, 278 irec->br_blockcount); 279 break; 280 case XFS_COW_FORK: 281 xchk_xref_is_cow_staging(info->sc, agbno, 282 irec->br_blockcount); 283 break; 284 } 285 286 xchk_ag_free(info->sc, &info->sc->sa); 287 } 288 289 /* 290 * Directories and attr forks should never have blocks that can't be addressed 291 * by a xfs_dablk_t. 292 */ 293 STATIC void 294 xchk_bmap_dirattr_extent( 295 struct xfs_inode *ip, 296 struct xchk_bmap_info *info, 297 struct xfs_bmbt_irec *irec) 298 { 299 struct xfs_mount *mp = ip->i_mount; 300 xfs_fileoff_t off; 301 302 if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK) 303 return; 304 305 if (!xfs_verify_dablk(mp, irec->br_startoff)) 306 xchk_fblock_set_corrupt(info->sc, info->whichfork, 307 irec->br_startoff); 308 309 off = irec->br_startoff + irec->br_blockcount - 1; 310 if (!xfs_verify_dablk(mp, off)) 311 xchk_fblock_set_corrupt(info->sc, info->whichfork, off); 312 } 313 314 /* Scrub a single extent record. */ 315 STATIC int 316 xchk_bmap_iextent( 317 struct xfs_inode *ip, 318 struct xchk_bmap_info *info, 319 struct xfs_bmbt_irec *irec) 320 { 321 struct xfs_mount *mp = info->sc->mp; 322 xfs_filblks_t end; 323 int error = 0; 324 325 /* 326 * Check for out-of-order extents. This record could have come 327 * from the incore list, for which there is no ordering check. 328 */ 329 if (irec->br_startoff < info->lastoff) 330 xchk_fblock_set_corrupt(info->sc, info->whichfork, 331 irec->br_startoff); 332 333 xchk_bmap_dirattr_extent(ip, info, irec); 334 335 /* There should never be a "hole" extent in either extent list. */ 336 if (irec->br_startblock == HOLESTARTBLOCK) 337 xchk_fblock_set_corrupt(info->sc, info->whichfork, 338 irec->br_startoff); 339 340 /* 341 * Check for delalloc extents. We never iterate the ones in the 342 * in-core extent scan, and we should never see these in the bmbt. 343 */ 344 if (isnullstartblock(irec->br_startblock)) 345 xchk_fblock_set_corrupt(info->sc, info->whichfork, 346 irec->br_startoff); 347 348 /* Make sure the extent points to a valid place. */ 349 if (irec->br_blockcount > MAXEXTLEN) 350 xchk_fblock_set_corrupt(info->sc, info->whichfork, 351 irec->br_startoff); 352 if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock) 353 xchk_fblock_set_corrupt(info->sc, info->whichfork, 354 irec->br_startoff); 355 end = irec->br_startblock + irec->br_blockcount - 1; 356 if (info->is_rt && 357 (!xfs_verify_rtbno(mp, irec->br_startblock) || 358 !xfs_verify_rtbno(mp, end))) 359 xchk_fblock_set_corrupt(info->sc, info->whichfork, 360 irec->br_startoff); 361 if (!info->is_rt && 362 (!xfs_verify_fsbno(mp, irec->br_startblock) || 363 !xfs_verify_fsbno(mp, end) || 364 XFS_FSB_TO_AGNO(mp, irec->br_startblock) != 365 XFS_FSB_TO_AGNO(mp, end))) 366 xchk_fblock_set_corrupt(info->sc, info->whichfork, 367 irec->br_startoff); 368 369 /* We don't allow unwritten extents on attr forks. */ 370 if (irec->br_state == XFS_EXT_UNWRITTEN && 371 info->whichfork == XFS_ATTR_FORK) 372 xchk_fblock_set_corrupt(info->sc, info->whichfork, 373 irec->br_startoff); 374 375 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 376 return 0; 377 378 if (info->is_rt) 379 xchk_bmap_rt_iextent_xref(ip, info, irec); 380 else 381 xchk_bmap_iextent_xref(ip, info, irec); 382 383 info->lastoff = irec->br_startoff + irec->br_blockcount; 384 return error; 385 } 386 387 /* Scrub a bmbt record. */ 388 STATIC int 389 xchk_bmapbt_rec( 390 struct xchk_btree *bs, 391 union xfs_btree_rec *rec) 392 { 393 struct xfs_bmbt_irec irec; 394 struct xfs_bmbt_irec iext_irec; 395 struct xfs_iext_cursor icur; 396 struct xchk_bmap_info *info = bs->private; 397 struct xfs_inode *ip = bs->cur->bc_ino.ip; 398 struct xfs_buf *bp = NULL; 399 struct xfs_btree_block *block; 400 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, info->whichfork); 401 uint64_t owner; 402 int i; 403 404 /* 405 * Check the owners of the btree blocks up to the level below 406 * the root since the verifiers don't do that. 407 */ 408 if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) && 409 bs->cur->bc_ptrs[0] == 1) { 410 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) { 411 block = xfs_btree_get_block(bs->cur, i, &bp); 412 owner = be64_to_cpu(block->bb_u.l.bb_owner); 413 if (owner != ip->i_ino) 414 xchk_fblock_set_corrupt(bs->sc, 415 info->whichfork, 0); 416 } 417 } 418 419 /* 420 * Check that the incore extent tree contains an extent that matches 421 * this one exactly. We validate those cached bmaps later, so we don't 422 * need to check them here. If the incore extent tree was just loaded 423 * from disk by the scrubber, we assume that its contents match what's 424 * on disk (we still hold the ILOCK) and skip the equivalence check. 425 */ 426 if (!info->was_loaded) 427 return 0; 428 429 xfs_bmbt_disk_get_all(&rec->bmbt, &irec); 430 if (!xfs_iext_lookup_extent(ip, ifp, irec.br_startoff, &icur, 431 &iext_irec) || 432 irec.br_startoff != iext_irec.br_startoff || 433 irec.br_startblock != iext_irec.br_startblock || 434 irec.br_blockcount != iext_irec.br_blockcount || 435 irec.br_state != iext_irec.br_state) 436 xchk_fblock_set_corrupt(bs->sc, info->whichfork, 437 irec.br_startoff); 438 return 0; 439 } 440 441 /* Scan the btree records. */ 442 STATIC int 443 xchk_bmap_btree( 444 struct xfs_scrub *sc, 445 int whichfork, 446 struct xchk_bmap_info *info) 447 { 448 struct xfs_owner_info oinfo; 449 struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork); 450 struct xfs_mount *mp = sc->mp; 451 struct xfs_inode *ip = sc->ip; 452 struct xfs_btree_cur *cur; 453 int error; 454 455 /* Load the incore bmap cache if it's not loaded. */ 456 info->was_loaded = ifp->if_flags & XFS_IFEXTENTS; 457 if (!info->was_loaded) { 458 error = xfs_iread_extents(sc->tp, ip, whichfork); 459 if (!xchk_fblock_process_error(sc, whichfork, 0, &error)) 460 goto out; 461 } 462 463 /* Check the btree structure. */ 464 cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork); 465 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 466 error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info); 467 xfs_btree_del_cursor(cur, error); 468 out: 469 return error; 470 } 471 472 struct xchk_bmap_check_rmap_info { 473 struct xfs_scrub *sc; 474 int whichfork; 475 struct xfs_iext_cursor icur; 476 }; 477 478 /* Can we find bmaps that fit this rmap? */ 479 STATIC int 480 xchk_bmap_check_rmap( 481 struct xfs_btree_cur *cur, 482 struct xfs_rmap_irec *rec, 483 void *priv) 484 { 485 struct xfs_bmbt_irec irec; 486 struct xchk_bmap_check_rmap_info *sbcri = priv; 487 struct xfs_ifork *ifp; 488 struct xfs_scrub *sc = sbcri->sc; 489 bool have_map; 490 491 /* Is this even the right fork? */ 492 if (rec->rm_owner != sc->ip->i_ino) 493 return 0; 494 if ((sbcri->whichfork == XFS_ATTR_FORK) ^ 495 !!(rec->rm_flags & XFS_RMAP_ATTR_FORK)) 496 return 0; 497 if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK) 498 return 0; 499 500 /* Now look up the bmbt record. */ 501 ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork); 502 if (!ifp) { 503 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 504 rec->rm_offset); 505 goto out; 506 } 507 have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset, 508 &sbcri->icur, &irec); 509 if (!have_map) 510 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 511 rec->rm_offset); 512 /* 513 * bmap extent record lengths are constrained to 2^21 blocks in length 514 * because of space constraints in the on-disk metadata structure. 515 * However, rmap extent record lengths are constrained only by AG 516 * length, so we have to loop through the bmbt to make sure that the 517 * entire rmap is covered by bmbt records. 518 */ 519 while (have_map) { 520 if (irec.br_startoff != rec->rm_offset) 521 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 522 rec->rm_offset); 523 if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp, 524 cur->bc_ag.agno, rec->rm_startblock)) 525 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 526 rec->rm_offset); 527 if (irec.br_blockcount > rec->rm_blockcount) 528 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 529 rec->rm_offset); 530 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 531 break; 532 rec->rm_startblock += irec.br_blockcount; 533 rec->rm_offset += irec.br_blockcount; 534 rec->rm_blockcount -= irec.br_blockcount; 535 if (rec->rm_blockcount == 0) 536 break; 537 have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec); 538 if (!have_map) 539 xchk_fblock_set_corrupt(sc, sbcri->whichfork, 540 rec->rm_offset); 541 } 542 543 out: 544 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 545 return -ECANCELED; 546 return 0; 547 } 548 549 /* Make sure each rmap has a corresponding bmbt entry. */ 550 STATIC int 551 xchk_bmap_check_ag_rmaps( 552 struct xfs_scrub *sc, 553 int whichfork, 554 xfs_agnumber_t agno) 555 { 556 struct xchk_bmap_check_rmap_info sbcri; 557 struct xfs_btree_cur *cur; 558 struct xfs_buf *agf; 559 int error; 560 561 error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf); 562 if (error) 563 return error; 564 565 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno); 566 if (!cur) { 567 error = -ENOMEM; 568 goto out_agf; 569 } 570 571 sbcri.sc = sc; 572 sbcri.whichfork = whichfork; 573 error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri); 574 if (error == -ECANCELED) 575 error = 0; 576 577 xfs_btree_del_cursor(cur, error); 578 out_agf: 579 xfs_trans_brelse(sc->tp, agf); 580 return error; 581 } 582 583 /* Make sure each rmap has a corresponding bmbt entry. */ 584 STATIC int 585 xchk_bmap_check_rmaps( 586 struct xfs_scrub *sc, 587 int whichfork) 588 { 589 struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork); 590 xfs_agnumber_t agno; 591 bool zero_size; 592 int error; 593 594 if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) || 595 whichfork == XFS_COW_FORK || 596 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 597 return 0; 598 599 /* Don't support realtime rmap checks yet. */ 600 if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK) 601 return 0; 602 603 ASSERT(XFS_IFORK_PTR(sc->ip, whichfork) != NULL); 604 605 /* 606 * Only do this for complex maps that are in btree format, or for 607 * situations where we would seem to have a size but zero extents. 608 * The inode repair code can zap broken iforks, which means we have 609 * to flag this bmap as corrupt if there are rmaps that need to be 610 * reattached. 611 */ 612 613 if (whichfork == XFS_DATA_FORK) 614 zero_size = i_size_read(VFS_I(sc->ip)) == 0; 615 else 616 zero_size = false; 617 618 if (ifp->if_format != XFS_DINODE_FMT_BTREE && 619 (zero_size || ifp->if_nextents > 0)) 620 return 0; 621 622 for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) { 623 error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno); 624 if (error) 625 return error; 626 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 627 break; 628 } 629 630 return 0; 631 } 632 633 /* 634 * Scrub an inode fork's block mappings. 635 * 636 * First we scan every record in every btree block, if applicable. 637 * Then we unconditionally scan the incore extent cache. 638 */ 639 STATIC int 640 xchk_bmap( 641 struct xfs_scrub *sc, 642 int whichfork) 643 { 644 struct xfs_bmbt_irec irec; 645 struct xchk_bmap_info info = { NULL }; 646 struct xfs_mount *mp = sc->mp; 647 struct xfs_inode *ip = sc->ip; 648 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 649 xfs_fileoff_t endoff; 650 struct xfs_iext_cursor icur; 651 int error = 0; 652 653 /* Non-existent forks can be ignored. */ 654 if (!ifp) 655 goto out; 656 657 info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip); 658 info.whichfork = whichfork; 659 info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip); 660 info.sc = sc; 661 662 switch (whichfork) { 663 case XFS_COW_FORK: 664 /* No CoW forks on non-reflink inodes/filesystems. */ 665 if (!xfs_is_reflink_inode(ip)) { 666 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 667 goto out; 668 } 669 break; 670 case XFS_ATTR_FORK: 671 if (!xfs_sb_version_hasattr(&mp->m_sb) && 672 !xfs_sb_version_hasattr2(&mp->m_sb)) 673 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 674 break; 675 default: 676 ASSERT(whichfork == XFS_DATA_FORK); 677 break; 678 } 679 680 /* Check the fork values */ 681 switch (ifp->if_format) { 682 case XFS_DINODE_FMT_UUID: 683 case XFS_DINODE_FMT_DEV: 684 case XFS_DINODE_FMT_LOCAL: 685 /* No mappings to check. */ 686 goto out; 687 case XFS_DINODE_FMT_EXTENTS: 688 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 689 xchk_fblock_set_corrupt(sc, whichfork, 0); 690 goto out; 691 } 692 break; 693 case XFS_DINODE_FMT_BTREE: 694 if (whichfork == XFS_COW_FORK) { 695 xchk_fblock_set_corrupt(sc, whichfork, 0); 696 goto out; 697 } 698 699 error = xchk_bmap_btree(sc, whichfork, &info); 700 if (error) 701 goto out; 702 break; 703 default: 704 xchk_fblock_set_corrupt(sc, whichfork, 0); 705 goto out; 706 } 707 708 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 709 goto out; 710 711 /* Find the offset of the last extent in the mapping. */ 712 error = xfs_bmap_last_offset(ip, &endoff, whichfork); 713 if (!xchk_fblock_process_error(sc, whichfork, 0, &error)) 714 goto out; 715 716 /* Scrub extent records. */ 717 info.lastoff = 0; 718 ifp = XFS_IFORK_PTR(ip, whichfork); 719 for_each_xfs_iext(ifp, &icur, &irec) { 720 if (xchk_should_terminate(sc, &error) || 721 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 722 goto out; 723 if (isnullstartblock(irec.br_startblock)) 724 continue; 725 if (irec.br_startoff >= endoff) { 726 xchk_fblock_set_corrupt(sc, whichfork, 727 irec.br_startoff); 728 goto out; 729 } 730 error = xchk_bmap_iextent(ip, &info, &irec); 731 if (error) 732 goto out; 733 } 734 735 error = xchk_bmap_check_rmaps(sc, whichfork); 736 if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error)) 737 goto out; 738 out: 739 return error; 740 } 741 742 /* Scrub an inode's data fork. */ 743 int 744 xchk_bmap_data( 745 struct xfs_scrub *sc) 746 { 747 return xchk_bmap(sc, XFS_DATA_FORK); 748 } 749 750 /* Scrub an inode's attr fork. */ 751 int 752 xchk_bmap_attr( 753 struct xfs_scrub *sc) 754 { 755 return xchk_bmap(sc, XFS_ATTR_FORK); 756 } 757 758 /* Scrub an inode's CoW fork. */ 759 int 760 xchk_bmap_cow( 761 struct xfs_scrub *sc) 762 { 763 if (!xfs_is_reflink_inode(sc->ip)) 764 return -ENOENT; 765 766 return xchk_bmap(sc, XFS_COW_FORK); 767 } 768