1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_btree.h" 13 #include "xfs_log_format.h" 14 #include "xfs_trans.h" 15 #include "xfs_sb.h" 16 #include "xfs_inode.h" 17 #include "xfs_icache.h" 18 #include "xfs_alloc.h" 19 #include "xfs_alloc_btree.h" 20 #include "xfs_ialloc.h" 21 #include "xfs_ialloc_btree.h" 22 #include "xfs_refcount_btree.h" 23 #include "xfs_rmap.h" 24 #include "xfs_rmap_btree.h" 25 #include "xfs_log.h" 26 #include "xfs_trans_priv.h" 27 #include "xfs_attr.h" 28 #include "xfs_reflink.h" 29 #include "scrub/scrub.h" 30 #include "scrub/common.h" 31 #include "scrub/trace.h" 32 #include "scrub/repair.h" 33 #include "scrub/health.h" 34 35 /* Common code for the metadata scrubbers. */ 36 37 /* 38 * Handling operational errors. 39 * 40 * The *_process_error() family of functions are used to process error return 41 * codes from functions called as part of a scrub operation. 42 * 43 * If there's no error, we return true to tell the caller that it's ok 44 * to move on to the next check in its list. 45 * 46 * For non-verifier errors (e.g. ENOMEM) we return false to tell the 47 * caller that something bad happened, and we preserve *error so that 48 * the caller can return the *error up the stack to userspace. 49 * 50 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting 51 * OFLAG_CORRUPT in sm_flags and the *error is cleared. In other words, 52 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT, 53 * not via return codes. We return false to tell the caller that 54 * something bad happened. Since the error has been cleared, the caller 55 * will (presumably) return that zero and scrubbing will move on to 56 * whatever's next. 57 * 58 * ftrace can be used to record the precise metadata location and the 59 * approximate code location of the failed operation. 60 */ 61 62 /* Check for operational errors. */ 63 static bool 64 __xchk_process_error( 65 struct xfs_scrub *sc, 66 xfs_agnumber_t agno, 67 xfs_agblock_t bno, 68 int *error, 69 __u32 errflag, 70 void *ret_ip) 71 { 72 switch (*error) { 73 case 0: 74 return true; 75 case -EDEADLOCK: 76 /* Used to restart an op with deadlock avoidance. */ 77 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); 78 break; 79 case -EFSBADCRC: 80 case -EFSCORRUPTED: 81 /* Note the badness but don't abort. */ 82 sc->sm->sm_flags |= errflag; 83 *error = 0; 84 /* fall through */ 85 default: 86 trace_xchk_op_error(sc, agno, bno, *error, 87 ret_ip); 88 break; 89 } 90 return false; 91 } 92 93 bool 94 xchk_process_error( 95 struct xfs_scrub *sc, 96 xfs_agnumber_t agno, 97 xfs_agblock_t bno, 98 int *error) 99 { 100 return __xchk_process_error(sc, agno, bno, error, 101 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 102 } 103 104 bool 105 xchk_xref_process_error( 106 struct xfs_scrub *sc, 107 xfs_agnumber_t agno, 108 xfs_agblock_t bno, 109 int *error) 110 { 111 return __xchk_process_error(sc, agno, bno, error, 112 XFS_SCRUB_OFLAG_XFAIL, __return_address); 113 } 114 115 /* Check for operational errors for a file offset. */ 116 static bool 117 __xchk_fblock_process_error( 118 struct xfs_scrub *sc, 119 int whichfork, 120 xfs_fileoff_t offset, 121 int *error, 122 __u32 errflag, 123 void *ret_ip) 124 { 125 switch (*error) { 126 case 0: 127 return true; 128 case -EDEADLOCK: 129 /* Used to restart an op with deadlock avoidance. */ 130 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); 131 break; 132 case -EFSBADCRC: 133 case -EFSCORRUPTED: 134 /* Note the badness but don't abort. */ 135 sc->sm->sm_flags |= errflag; 136 *error = 0; 137 /* fall through */ 138 default: 139 trace_xchk_file_op_error(sc, whichfork, offset, *error, 140 ret_ip); 141 break; 142 } 143 return false; 144 } 145 146 bool 147 xchk_fblock_process_error( 148 struct xfs_scrub *sc, 149 int whichfork, 150 xfs_fileoff_t offset, 151 int *error) 152 { 153 return __xchk_fblock_process_error(sc, whichfork, offset, error, 154 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 155 } 156 157 bool 158 xchk_fblock_xref_process_error( 159 struct xfs_scrub *sc, 160 int whichfork, 161 xfs_fileoff_t offset, 162 int *error) 163 { 164 return __xchk_fblock_process_error(sc, whichfork, offset, error, 165 XFS_SCRUB_OFLAG_XFAIL, __return_address); 166 } 167 168 /* 169 * Handling scrub corruption/optimization/warning checks. 170 * 171 * The *_set_{corrupt,preen,warning}() family of functions are used to 172 * record the presence of metadata that is incorrect (corrupt), could be 173 * optimized somehow (preen), or should be flagged for administrative 174 * review but is not incorrect (warn). 175 * 176 * ftrace can be used to record the precise metadata location and 177 * approximate code location of the failed check. 178 */ 179 180 /* Record a block which could be optimized. */ 181 void 182 xchk_block_set_preen( 183 struct xfs_scrub *sc, 184 struct xfs_buf *bp) 185 { 186 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; 187 trace_xchk_block_preen(sc, bp->b_bn, __return_address); 188 } 189 190 /* 191 * Record an inode which could be optimized. The trace data will 192 * include the block given by bp if bp is given; otherwise it will use 193 * the block location of the inode record itself. 194 */ 195 void 196 xchk_ino_set_preen( 197 struct xfs_scrub *sc, 198 xfs_ino_t ino) 199 { 200 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; 201 trace_xchk_ino_preen(sc, ino, __return_address); 202 } 203 204 /* Record something being wrong with the filesystem primary superblock. */ 205 void 206 xchk_set_corrupt( 207 struct xfs_scrub *sc) 208 { 209 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 210 trace_xchk_fs_error(sc, 0, __return_address); 211 } 212 213 /* Record a corrupt block. */ 214 void 215 xchk_block_set_corrupt( 216 struct xfs_scrub *sc, 217 struct xfs_buf *bp) 218 { 219 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 220 trace_xchk_block_error(sc, bp->b_bn, __return_address); 221 } 222 223 /* Record a corruption while cross-referencing. */ 224 void 225 xchk_block_xref_set_corrupt( 226 struct xfs_scrub *sc, 227 struct xfs_buf *bp) 228 { 229 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 230 trace_xchk_block_error(sc, bp->b_bn, __return_address); 231 } 232 233 /* 234 * Record a corrupt inode. The trace data will include the block given 235 * by bp if bp is given; otherwise it will use the block location of the 236 * inode record itself. 237 */ 238 void 239 xchk_ino_set_corrupt( 240 struct xfs_scrub *sc, 241 xfs_ino_t ino) 242 { 243 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 244 trace_xchk_ino_error(sc, ino, __return_address); 245 } 246 247 /* Record a corruption while cross-referencing with an inode. */ 248 void 249 xchk_ino_xref_set_corrupt( 250 struct xfs_scrub *sc, 251 xfs_ino_t ino) 252 { 253 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 254 trace_xchk_ino_error(sc, ino, __return_address); 255 } 256 257 /* Record corruption in a block indexed by a file fork. */ 258 void 259 xchk_fblock_set_corrupt( 260 struct xfs_scrub *sc, 261 int whichfork, 262 xfs_fileoff_t offset) 263 { 264 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 265 trace_xchk_fblock_error(sc, whichfork, offset, __return_address); 266 } 267 268 /* Record a corruption while cross-referencing a fork block. */ 269 void 270 xchk_fblock_xref_set_corrupt( 271 struct xfs_scrub *sc, 272 int whichfork, 273 xfs_fileoff_t offset) 274 { 275 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 276 trace_xchk_fblock_error(sc, whichfork, offset, __return_address); 277 } 278 279 /* 280 * Warn about inodes that need administrative review but is not 281 * incorrect. 282 */ 283 void 284 xchk_ino_set_warning( 285 struct xfs_scrub *sc, 286 xfs_ino_t ino) 287 { 288 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; 289 trace_xchk_ino_warning(sc, ino, __return_address); 290 } 291 292 /* Warn about a block indexed by a file fork that needs review. */ 293 void 294 xchk_fblock_set_warning( 295 struct xfs_scrub *sc, 296 int whichfork, 297 xfs_fileoff_t offset) 298 { 299 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; 300 trace_xchk_fblock_warning(sc, whichfork, offset, __return_address); 301 } 302 303 /* Signal an incomplete scrub. */ 304 void 305 xchk_set_incomplete( 306 struct xfs_scrub *sc) 307 { 308 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE; 309 trace_xchk_incomplete(sc, __return_address); 310 } 311 312 /* 313 * rmap scrubbing -- compute the number of blocks with a given owner, 314 * at least according to the reverse mapping data. 315 */ 316 317 struct xchk_rmap_ownedby_info { 318 const struct xfs_owner_info *oinfo; 319 xfs_filblks_t *blocks; 320 }; 321 322 STATIC int 323 xchk_count_rmap_ownedby_irec( 324 struct xfs_btree_cur *cur, 325 struct xfs_rmap_irec *rec, 326 void *priv) 327 { 328 struct xchk_rmap_ownedby_info *sroi = priv; 329 bool irec_attr; 330 bool oinfo_attr; 331 332 irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK; 333 oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK; 334 335 if (rec->rm_owner != sroi->oinfo->oi_owner) 336 return 0; 337 338 if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr) 339 (*sroi->blocks) += rec->rm_blockcount; 340 341 return 0; 342 } 343 344 /* 345 * Calculate the number of blocks the rmap thinks are owned by something. 346 * The caller should pass us an rmapbt cursor. 347 */ 348 int 349 xchk_count_rmap_ownedby_ag( 350 struct xfs_scrub *sc, 351 struct xfs_btree_cur *cur, 352 const struct xfs_owner_info *oinfo, 353 xfs_filblks_t *blocks) 354 { 355 struct xchk_rmap_ownedby_info sroi = { 356 .oinfo = oinfo, 357 .blocks = blocks, 358 }; 359 360 *blocks = 0; 361 return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec, 362 &sroi); 363 } 364 365 /* 366 * AG scrubbing 367 * 368 * These helpers facilitate locking an allocation group's header 369 * buffers, setting up cursors for all btrees that are present, and 370 * cleaning everything up once we're through. 371 */ 372 373 /* Decide if we want to return an AG header read failure. */ 374 static inline bool 375 want_ag_read_header_failure( 376 struct xfs_scrub *sc, 377 unsigned int type) 378 { 379 /* Return all AG header read failures when scanning btrees. */ 380 if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF && 381 sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL && 382 sc->sm->sm_type != XFS_SCRUB_TYPE_AGI) 383 return true; 384 /* 385 * If we're scanning a given type of AG header, we only want to 386 * see read failures from that specific header. We'd like the 387 * other headers to cross-check them, but this isn't required. 388 */ 389 if (sc->sm->sm_type == type) 390 return true; 391 return false; 392 } 393 394 /* 395 * Grab all the headers for an AG. 396 * 397 * The headers should be released by xchk_ag_free, but as a fail 398 * safe we attach all the buffers we grab to the scrub transaction so 399 * they'll all be freed when we cancel it. 400 */ 401 int 402 xchk_ag_read_headers( 403 struct xfs_scrub *sc, 404 xfs_agnumber_t agno, 405 struct xfs_buf **agi, 406 struct xfs_buf **agf, 407 struct xfs_buf **agfl) 408 { 409 struct xfs_mount *mp = sc->mp; 410 int error; 411 412 error = xfs_ialloc_read_agi(mp, sc->tp, agno, agi); 413 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI)) 414 goto out; 415 416 error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, agf); 417 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF)) 418 goto out; 419 420 error = xfs_alloc_read_agfl(mp, sc->tp, agno, agfl); 421 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL)) 422 goto out; 423 error = 0; 424 out: 425 return error; 426 } 427 428 /* Release all the AG btree cursors. */ 429 void 430 xchk_ag_btcur_free( 431 struct xchk_ag *sa) 432 { 433 if (sa->refc_cur) 434 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR); 435 if (sa->rmap_cur) 436 xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR); 437 if (sa->fino_cur) 438 xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR); 439 if (sa->ino_cur) 440 xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR); 441 if (sa->cnt_cur) 442 xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR); 443 if (sa->bno_cur) 444 xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR); 445 446 sa->refc_cur = NULL; 447 sa->rmap_cur = NULL; 448 sa->fino_cur = NULL; 449 sa->ino_cur = NULL; 450 sa->bno_cur = NULL; 451 sa->cnt_cur = NULL; 452 } 453 454 /* Initialize all the btree cursors for an AG. */ 455 int 456 xchk_ag_btcur_init( 457 struct xfs_scrub *sc, 458 struct xchk_ag *sa) 459 { 460 struct xfs_mount *mp = sc->mp; 461 xfs_agnumber_t agno = sa->agno; 462 463 xchk_perag_get(sc->mp, sa); 464 if (sa->agf_bp && 465 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_BNO)) { 466 /* Set up a bnobt cursor for cross-referencing. */ 467 sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp, 468 agno, XFS_BTNUM_BNO); 469 } 470 471 if (sa->agf_bp && 472 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_CNT)) { 473 /* Set up a cntbt cursor for cross-referencing. */ 474 sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp, 475 agno, XFS_BTNUM_CNT); 476 } 477 478 /* Set up a inobt cursor for cross-referencing. */ 479 if (sa->agi_bp && 480 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) { 481 sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp, 482 agno, XFS_BTNUM_INO); 483 } 484 485 /* Set up a finobt cursor for cross-referencing. */ 486 if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb) && 487 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) { 488 sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp, 489 agno, XFS_BTNUM_FINO); 490 } 491 492 /* Set up a rmapbt cursor for cross-referencing. */ 493 if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb) && 494 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_RMAP)) { 495 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp, 496 agno); 497 } 498 499 /* Set up a refcountbt cursor for cross-referencing. */ 500 if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb) && 501 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_REFC)) { 502 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp, 503 sa->agf_bp, agno); 504 } 505 506 return 0; 507 } 508 509 /* Release the AG header context and btree cursors. */ 510 void 511 xchk_ag_free( 512 struct xfs_scrub *sc, 513 struct xchk_ag *sa) 514 { 515 xchk_ag_btcur_free(sa); 516 if (sa->agfl_bp) { 517 xfs_trans_brelse(sc->tp, sa->agfl_bp); 518 sa->agfl_bp = NULL; 519 } 520 if (sa->agf_bp) { 521 xfs_trans_brelse(sc->tp, sa->agf_bp); 522 sa->agf_bp = NULL; 523 } 524 if (sa->agi_bp) { 525 xfs_trans_brelse(sc->tp, sa->agi_bp); 526 sa->agi_bp = NULL; 527 } 528 if (sa->pag) { 529 xfs_perag_put(sa->pag); 530 sa->pag = NULL; 531 } 532 sa->agno = NULLAGNUMBER; 533 } 534 535 /* 536 * For scrub, grab the AGI and the AGF headers, in that order. Locking 537 * order requires us to get the AGI before the AGF. We use the 538 * transaction to avoid deadlocking on crosslinked metadata buffers; 539 * either the caller passes one in (bmap scrub) or we have to create a 540 * transaction ourselves. 541 */ 542 int 543 xchk_ag_init( 544 struct xfs_scrub *sc, 545 xfs_agnumber_t agno, 546 struct xchk_ag *sa) 547 { 548 int error; 549 550 sa->agno = agno; 551 error = xchk_ag_read_headers(sc, agno, &sa->agi_bp, 552 &sa->agf_bp, &sa->agfl_bp); 553 if (error) 554 return error; 555 556 return xchk_ag_btcur_init(sc, sa); 557 } 558 559 /* 560 * Grab the per-ag structure if we haven't already gotten it. Teardown of the 561 * xchk_ag will release it for us. 562 */ 563 void 564 xchk_perag_get( 565 struct xfs_mount *mp, 566 struct xchk_ag *sa) 567 { 568 if (!sa->pag) 569 sa->pag = xfs_perag_get(mp, sa->agno); 570 } 571 572 /* Per-scrubber setup functions */ 573 574 /* 575 * Grab an empty transaction so that we can re-grab locked buffers if 576 * one of our btrees turns out to be cyclic. 577 * 578 * If we're going to repair something, we need to ask for the largest possible 579 * log reservation so that we can handle the worst case scenario for metadata 580 * updates while rebuilding a metadata item. We also need to reserve as many 581 * blocks in the head transaction as we think we're going to need to rebuild 582 * the metadata object. 583 */ 584 int 585 xchk_trans_alloc( 586 struct xfs_scrub *sc, 587 uint resblks) 588 { 589 if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) 590 return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate, 591 resblks, 0, 0, &sc->tp); 592 593 return xfs_trans_alloc_empty(sc->mp, &sc->tp); 594 } 595 596 /* Set us up with a transaction and an empty context. */ 597 int 598 xchk_setup_fs( 599 struct xfs_scrub *sc, 600 struct xfs_inode *ip) 601 { 602 uint resblks; 603 604 resblks = xrep_calc_ag_resblks(sc); 605 return xchk_trans_alloc(sc, resblks); 606 } 607 608 /* Set us up with AG headers and btree cursors. */ 609 int 610 xchk_setup_ag_btree( 611 struct xfs_scrub *sc, 612 struct xfs_inode *ip, 613 bool force_log) 614 { 615 struct xfs_mount *mp = sc->mp; 616 int error; 617 618 /* 619 * If the caller asks us to checkpont the log, do so. This 620 * expensive operation should be performed infrequently and only 621 * as a last resort. Any caller that sets force_log should 622 * document why they need to do so. 623 */ 624 if (force_log) { 625 error = xchk_checkpoint_log(mp); 626 if (error) 627 return error; 628 } 629 630 error = xchk_setup_fs(sc, ip); 631 if (error) 632 return error; 633 634 return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa); 635 } 636 637 /* Push everything out of the log onto disk. */ 638 int 639 xchk_checkpoint_log( 640 struct xfs_mount *mp) 641 { 642 int error; 643 644 error = xfs_log_force(mp, XFS_LOG_SYNC); 645 if (error) 646 return error; 647 xfs_ail_push_all_sync(mp->m_ail); 648 return 0; 649 } 650 651 /* 652 * Given an inode and the scrub control structure, grab either the 653 * inode referenced in the control structure or the inode passed in. 654 * The inode is not locked. 655 */ 656 int 657 xchk_get_inode( 658 struct xfs_scrub *sc, 659 struct xfs_inode *ip_in) 660 { 661 struct xfs_imap imap; 662 struct xfs_mount *mp = sc->mp; 663 struct xfs_inode *ip = NULL; 664 int error; 665 666 /* We want to scan the inode we already had opened. */ 667 if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) { 668 sc->ip = ip_in; 669 return 0; 670 } 671 672 /* Look up the inode, see if the generation number matches. */ 673 if (xfs_internal_inum(mp, sc->sm->sm_ino)) 674 return -ENOENT; 675 error = xfs_iget(mp, NULL, sc->sm->sm_ino, 676 XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip); 677 switch (error) { 678 case -ENOENT: 679 /* Inode doesn't exist, just bail out. */ 680 return error; 681 case 0: 682 /* Got an inode, continue. */ 683 break; 684 case -EINVAL: 685 /* 686 * -EINVAL with IGET_UNTRUSTED could mean one of several 687 * things: userspace gave us an inode number that doesn't 688 * correspond to fs space, or doesn't have an inobt entry; 689 * or it could simply mean that the inode buffer failed the 690 * read verifiers. 691 * 692 * Try just the inode mapping lookup -- if it succeeds, then 693 * the inode buffer verifier failed and something needs fixing. 694 * Otherwise, we really couldn't find it so tell userspace 695 * that it no longer exists. 696 */ 697 error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap, 698 XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE); 699 if (error) 700 return -ENOENT; 701 error = -EFSCORRUPTED; 702 /* fall through */ 703 default: 704 trace_xchk_op_error(sc, 705 XFS_INO_TO_AGNO(mp, sc->sm->sm_ino), 706 XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino), 707 error, __return_address); 708 return error; 709 } 710 if (VFS_I(ip)->i_generation != sc->sm->sm_gen) { 711 xfs_irele(ip); 712 return -ENOENT; 713 } 714 715 sc->ip = ip; 716 return 0; 717 } 718 719 /* Set us up to scrub a file's contents. */ 720 int 721 xchk_setup_inode_contents( 722 struct xfs_scrub *sc, 723 struct xfs_inode *ip, 724 unsigned int resblks) 725 { 726 int error; 727 728 error = xchk_get_inode(sc, ip); 729 if (error) 730 return error; 731 732 /* Got the inode, lock it and we're ready to go. */ 733 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; 734 xfs_ilock(sc->ip, sc->ilock_flags); 735 error = xchk_trans_alloc(sc, resblks); 736 if (error) 737 goto out; 738 sc->ilock_flags |= XFS_ILOCK_EXCL; 739 xfs_ilock(sc->ip, XFS_ILOCK_EXCL); 740 741 out: 742 /* scrub teardown will unlock and release the inode for us */ 743 return error; 744 } 745 746 /* 747 * Predicate that decides if we need to evaluate the cross-reference check. 748 * If there was an error accessing the cross-reference btree, just delete 749 * the cursor and skip the check. 750 */ 751 bool 752 xchk_should_check_xref( 753 struct xfs_scrub *sc, 754 int *error, 755 struct xfs_btree_cur **curpp) 756 { 757 /* No point in xref if we already know we're corrupt. */ 758 if (xchk_skip_xref(sc->sm)) 759 return false; 760 761 if (*error == 0) 762 return true; 763 764 if (curpp) { 765 /* If we've already given up on xref, just bail out. */ 766 if (!*curpp) 767 return false; 768 769 /* xref error, delete cursor and bail out. */ 770 xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR); 771 *curpp = NULL; 772 } 773 774 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL; 775 trace_xchk_xref_error(sc, *error, __return_address); 776 777 /* 778 * Errors encountered during cross-referencing with another 779 * data structure should not cause this scrubber to abort. 780 */ 781 *error = 0; 782 return false; 783 } 784 785 /* Run the structure verifiers on in-memory buffers to detect bad memory. */ 786 void 787 xchk_buffer_recheck( 788 struct xfs_scrub *sc, 789 struct xfs_buf *bp) 790 { 791 xfs_failaddr_t fa; 792 793 if (bp->b_ops == NULL) { 794 xchk_block_set_corrupt(sc, bp); 795 return; 796 } 797 if (bp->b_ops->verify_struct == NULL) { 798 xchk_set_incomplete(sc); 799 return; 800 } 801 fa = bp->b_ops->verify_struct(bp); 802 if (!fa) 803 return; 804 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 805 trace_xchk_block_error(sc, bp->b_bn, fa); 806 } 807 808 /* 809 * Scrub the attr/data forks of a metadata inode. The metadata inode must be 810 * pointed to by sc->ip and the ILOCK must be held. 811 */ 812 int 813 xchk_metadata_inode_forks( 814 struct xfs_scrub *sc) 815 { 816 __u32 smtype; 817 bool shared; 818 int error; 819 820 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 821 return 0; 822 823 /* Metadata inodes don't live on the rt device. */ 824 if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) { 825 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 826 return 0; 827 } 828 829 /* They should never participate in reflink. */ 830 if (xfs_is_reflink_inode(sc->ip)) { 831 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 832 return 0; 833 } 834 835 /* They also should never have extended attributes. */ 836 if (xfs_inode_hasattr(sc->ip)) { 837 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 838 return 0; 839 } 840 841 /* Invoke the data fork scrubber. */ 842 smtype = sc->sm->sm_type; 843 sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD; 844 error = xchk_bmap_data(sc); 845 sc->sm->sm_type = smtype; 846 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 847 return error; 848 849 /* Look for incorrect shared blocks. */ 850 if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) { 851 error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip, 852 &shared); 853 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, 854 &error)) 855 return error; 856 if (shared) 857 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 858 } 859 860 return error; 861 } 862 863 /* 864 * Try to lock an inode in violation of the usual locking order rules. For 865 * example, trying to get the IOLOCK while in transaction context, or just 866 * plain breaking AG-order or inode-order inode locking rules. Either way, 867 * the only way to avoid an ABBA deadlock is to use trylock and back off if 868 * we can't. 869 */ 870 int 871 xchk_ilock_inverted( 872 struct xfs_inode *ip, 873 uint lock_mode) 874 { 875 int i; 876 877 for (i = 0; i < 20; i++) { 878 if (xfs_ilock_nowait(ip, lock_mode)) 879 return 0; 880 delay(1); 881 } 882 return -EDEADLOCK; 883 } 884 885 /* Pause background reaping of resources. */ 886 void 887 xchk_stop_reaping( 888 struct xfs_scrub *sc) 889 { 890 sc->flags |= XCHK_REAPING_DISABLED; 891 xfs_blockgc_stop(sc->mp); 892 } 893 894 /* Restart background reaping of resources. */ 895 void 896 xchk_start_reaping( 897 struct xfs_scrub *sc) 898 { 899 xfs_blockgc_start(sc->mp); 900 sc->flags &= ~XCHK_REAPING_DISABLED; 901 } 902