1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2018 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_defer.h" 13 #include "xfs_btree.h" 14 #include "xfs_bit.h" 15 #include "xfs_log_format.h" 16 #include "xfs_trans.h" 17 #include "xfs_sb.h" 18 #include "xfs_inode.h" 19 #include "xfs_icache.h" 20 #include "xfs_alloc.h" 21 #include "xfs_alloc_btree.h" 22 #include "xfs_ialloc.h" 23 #include "xfs_ialloc_btree.h" 24 #include "xfs_rmap.h" 25 #include "xfs_rmap_btree.h" 26 #include "xfs_refcount.h" 27 #include "xfs_refcount_btree.h" 28 #include "xfs_extent_busy.h" 29 #include "xfs_ag_resv.h" 30 #include "xfs_trans_space.h" 31 #include "xfs_quota.h" 32 #include "scrub/xfs_scrub.h" 33 #include "scrub/scrub.h" 34 #include "scrub/common.h" 35 #include "scrub/trace.h" 36 #include "scrub/repair.h" 37 38 /* 39 * Attempt to repair some metadata, if the metadata is corrupt and userspace 40 * told us to fix it. This function returns -EAGAIN to mean "re-run scrub", 41 * and will set *fixed to true if it thinks it repaired anything. 42 */ 43 int 44 xfs_repair_attempt( 45 struct xfs_inode *ip, 46 struct xfs_scrub_context *sc, 47 bool *fixed) 48 { 49 int error = 0; 50 51 trace_xfs_repair_attempt(ip, sc->sm, error); 52 53 xfs_scrub_ag_btcur_free(&sc->sa); 54 55 /* Repair whatever's broken. */ 56 ASSERT(sc->ops->repair); 57 error = sc->ops->repair(sc); 58 trace_xfs_repair_done(ip, sc->sm, error); 59 switch (error) { 60 case 0: 61 /* 62 * Repair succeeded. Commit the fixes and perform a second 63 * scrub so that we can tell userspace if we fixed the problem. 64 */ 65 sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT; 66 *fixed = true; 67 return -EAGAIN; 68 case -EDEADLOCK: 69 case -EAGAIN: 70 /* Tell the caller to try again having grabbed all the locks. */ 71 if (!sc->try_harder) { 72 sc->try_harder = true; 73 return -EAGAIN; 74 } 75 /* 76 * We tried harder but still couldn't grab all the resources 77 * we needed to fix it. The corruption has not been fixed, 78 * so report back to userspace. 79 */ 80 return -EFSCORRUPTED; 81 default: 82 return error; 83 } 84 } 85 86 /* 87 * Complain about unfixable problems in the filesystem. We don't log 88 * corruptions when IFLAG_REPAIR wasn't set on the assumption that the driver 89 * program is xfs_scrub, which will call back with IFLAG_REPAIR set if the 90 * administrator isn't running xfs_scrub in no-repairs mode. 91 * 92 * Use this helper function because _ratelimited silently declares a static 93 * structure to track rate limiting information. 94 */ 95 void 96 xfs_repair_failure( 97 struct xfs_mount *mp) 98 { 99 xfs_alert_ratelimited(mp, 100 "Corruption not fixed during online repair. Unmount and run xfs_repair."); 101 } 102 103 /* 104 * Repair probe -- userspace uses this to probe if we're willing to repair a 105 * given mountpoint. 106 */ 107 int 108 xfs_repair_probe( 109 struct xfs_scrub_context *sc) 110 { 111 int error = 0; 112 113 if (xfs_scrub_should_terminate(sc, &error)) 114 return error; 115 116 return 0; 117 } 118 119 /* 120 * Roll a transaction, keeping the AG headers locked and reinitializing 121 * the btree cursors. 122 */ 123 int 124 xfs_repair_roll_ag_trans( 125 struct xfs_scrub_context *sc) 126 { 127 int error; 128 129 /* Keep the AG header buffers locked so we can keep going. */ 130 xfs_trans_bhold(sc->tp, sc->sa.agi_bp); 131 xfs_trans_bhold(sc->tp, sc->sa.agf_bp); 132 xfs_trans_bhold(sc->tp, sc->sa.agfl_bp); 133 134 /* Roll the transaction. */ 135 error = xfs_trans_roll(&sc->tp); 136 if (error) 137 goto out_release; 138 139 /* Join AG headers to the new transaction. */ 140 xfs_trans_bjoin(sc->tp, sc->sa.agi_bp); 141 xfs_trans_bjoin(sc->tp, sc->sa.agf_bp); 142 xfs_trans_bjoin(sc->tp, sc->sa.agfl_bp); 143 144 return 0; 145 146 out_release: 147 /* 148 * Rolling failed, so release the hold on the buffers. The 149 * buffers will be released during teardown on our way out 150 * of the kernel. 151 */ 152 xfs_trans_bhold_release(sc->tp, sc->sa.agi_bp); 153 xfs_trans_bhold_release(sc->tp, sc->sa.agf_bp); 154 xfs_trans_bhold_release(sc->tp, sc->sa.agfl_bp); 155 156 return error; 157 } 158 159 /* 160 * Does the given AG have enough space to rebuild a btree? Neither AG 161 * reservation can be critical, and we must have enough space (factoring 162 * in AG reservations) to construct a whole btree. 163 */ 164 bool 165 xfs_repair_ag_has_space( 166 struct xfs_perag *pag, 167 xfs_extlen_t nr_blocks, 168 enum xfs_ag_resv_type type) 169 { 170 return !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) && 171 !xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) && 172 pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks; 173 } 174 175 /* 176 * Figure out how many blocks to reserve for an AG repair. We calculate the 177 * worst case estimate for the number of blocks we'd need to rebuild one of 178 * any type of per-AG btree. 179 */ 180 xfs_extlen_t 181 xfs_repair_calc_ag_resblks( 182 struct xfs_scrub_context *sc) 183 { 184 struct xfs_mount *mp = sc->mp; 185 struct xfs_scrub_metadata *sm = sc->sm; 186 struct xfs_perag *pag; 187 struct xfs_buf *bp; 188 xfs_agino_t icount = 0; 189 xfs_extlen_t aglen = 0; 190 xfs_extlen_t usedlen; 191 xfs_extlen_t freelen; 192 xfs_extlen_t bnobt_sz; 193 xfs_extlen_t inobt_sz; 194 xfs_extlen_t rmapbt_sz; 195 xfs_extlen_t refcbt_sz; 196 int error; 197 198 if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)) 199 return 0; 200 201 /* Use in-core counters if possible. */ 202 pag = xfs_perag_get(mp, sm->sm_agno); 203 if (pag->pagi_init) 204 icount = pag->pagi_count; 205 206 /* 207 * Otherwise try to get the actual counters from disk; if not, make 208 * some worst case assumptions. 209 */ 210 if (icount == 0) { 211 error = xfs_ialloc_read_agi(mp, NULL, sm->sm_agno, &bp); 212 if (error) { 213 icount = mp->m_sb.sb_agblocks / mp->m_sb.sb_inopblock; 214 } else { 215 icount = pag->pagi_count; 216 xfs_buf_relse(bp); 217 } 218 } 219 220 /* Now grab the block counters from the AGF. */ 221 error = xfs_alloc_read_agf(mp, NULL, sm->sm_agno, 0, &bp); 222 if (error) { 223 aglen = mp->m_sb.sb_agblocks; 224 freelen = aglen; 225 usedlen = aglen; 226 } else { 227 aglen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_length); 228 freelen = pag->pagf_freeblks; 229 usedlen = aglen - freelen; 230 xfs_buf_relse(bp); 231 } 232 xfs_perag_put(pag); 233 234 trace_xfs_repair_calc_ag_resblks(mp, sm->sm_agno, icount, aglen, 235 freelen, usedlen); 236 237 /* 238 * Figure out how many blocks we'd need worst case to rebuild 239 * each type of btree. Note that we can only rebuild the 240 * bnobt/cntbt or inobt/finobt as pairs. 241 */ 242 bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen); 243 if (xfs_sb_version_hassparseinodes(&mp->m_sb)) 244 inobt_sz = xfs_iallocbt_calc_size(mp, icount / 245 XFS_INODES_PER_HOLEMASK_BIT); 246 else 247 inobt_sz = xfs_iallocbt_calc_size(mp, icount / 248 XFS_INODES_PER_CHUNK); 249 if (xfs_sb_version_hasfinobt(&mp->m_sb)) 250 inobt_sz *= 2; 251 if (xfs_sb_version_hasreflink(&mp->m_sb)) 252 refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen); 253 else 254 refcbt_sz = 0; 255 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { 256 /* 257 * Guess how many blocks we need to rebuild the rmapbt. 258 * For non-reflink filesystems we can't have more records than 259 * used blocks. However, with reflink it's possible to have 260 * more than one rmap record per AG block. We don't know how 261 * many rmaps there could be in the AG, so we start off with 262 * what we hope is an generous over-estimation. 263 */ 264 if (xfs_sb_version_hasreflink(&mp->m_sb)) 265 rmapbt_sz = xfs_rmapbt_calc_size(mp, 266 (unsigned long long)aglen * 2); 267 else 268 rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen); 269 } else { 270 rmapbt_sz = 0; 271 } 272 273 trace_xfs_repair_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz, 274 inobt_sz, rmapbt_sz, refcbt_sz); 275 276 return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz)); 277 } 278 279 /* Allocate a block in an AG. */ 280 int 281 xfs_repair_alloc_ag_block( 282 struct xfs_scrub_context *sc, 283 struct xfs_owner_info *oinfo, 284 xfs_fsblock_t *fsbno, 285 enum xfs_ag_resv_type resv) 286 { 287 struct xfs_alloc_arg args = {0}; 288 xfs_agblock_t bno; 289 int error; 290 291 switch (resv) { 292 case XFS_AG_RESV_AGFL: 293 case XFS_AG_RESV_RMAPBT: 294 error = xfs_alloc_get_freelist(sc->tp, sc->sa.agf_bp, &bno, 1); 295 if (error) 296 return error; 297 if (bno == NULLAGBLOCK) 298 return -ENOSPC; 299 xfs_extent_busy_reuse(sc->mp, sc->sa.agno, bno, 300 1, false); 301 *fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.agno, bno); 302 if (resv == XFS_AG_RESV_RMAPBT) 303 xfs_ag_resv_rmapbt_alloc(sc->mp, sc->sa.agno); 304 return 0; 305 default: 306 break; 307 } 308 309 args.tp = sc->tp; 310 args.mp = sc->mp; 311 args.oinfo = *oinfo; 312 args.fsbno = XFS_AGB_TO_FSB(args.mp, sc->sa.agno, 0); 313 args.minlen = 1; 314 args.maxlen = 1; 315 args.prod = 1; 316 args.type = XFS_ALLOCTYPE_THIS_AG; 317 args.resv = resv; 318 319 error = xfs_alloc_vextent(&args); 320 if (error) 321 return error; 322 if (args.fsbno == NULLFSBLOCK) 323 return -ENOSPC; 324 ASSERT(args.len == 1); 325 *fsbno = args.fsbno; 326 327 return 0; 328 } 329 330 /* Initialize a new AG btree root block with zero entries. */ 331 int 332 xfs_repair_init_btblock( 333 struct xfs_scrub_context *sc, 334 xfs_fsblock_t fsb, 335 struct xfs_buf **bpp, 336 xfs_btnum_t btnum, 337 const struct xfs_buf_ops *ops) 338 { 339 struct xfs_trans *tp = sc->tp; 340 struct xfs_mount *mp = sc->mp; 341 struct xfs_buf *bp; 342 343 trace_xfs_repair_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb), 344 XFS_FSB_TO_AGBNO(mp, fsb), btnum); 345 346 ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.agno); 347 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, fsb), 348 XFS_FSB_TO_BB(mp, 1), 0); 349 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); 350 xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.agno, 0); 351 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF); 352 xfs_trans_log_buf(tp, bp, 0, bp->b_length); 353 bp->b_ops = ops; 354 *bpp = bp; 355 356 return 0; 357 } 358 359 /* 360 * Reconstructing per-AG Btrees 361 * 362 * When a space btree is corrupt, we don't bother trying to fix it. Instead, 363 * we scan secondary space metadata to derive the records that should be in 364 * the damaged btree, initialize a fresh btree root, and insert the records. 365 * Note that for rebuilding the rmapbt we scan all the primary data to 366 * generate the new records. 367 * 368 * However, that leaves the matter of removing all the metadata describing the 369 * old broken structure. For primary metadata we use the rmap data to collect 370 * every extent with a matching rmap owner (exlist); we then iterate all other 371 * metadata structures with the same rmap owner to collect the extents that 372 * cannot be removed (sublist). We then subtract sublist from exlist to 373 * derive the blocks that were used by the old btree. These blocks can be 374 * reaped. 375 * 376 * For rmapbt reconstructions we must use different tactics for extent 377 * collection. First we iterate all primary metadata (this excludes the old 378 * rmapbt, obviously) to generate new rmap records. The gaps in the rmap 379 * records are collected as exlist. The bnobt records are collected as 380 * sublist. As with the other btrees we subtract sublist from exlist, and the 381 * result (since the rmapbt lives in the free space) are the blocks from the 382 * old rmapbt. 383 */ 384 385 /* Collect a dead btree extent for later disposal. */ 386 int 387 xfs_repair_collect_btree_extent( 388 struct xfs_scrub_context *sc, 389 struct xfs_repair_extent_list *exlist, 390 xfs_fsblock_t fsbno, 391 xfs_extlen_t len) 392 { 393 struct xfs_repair_extent *rex; 394 395 trace_xfs_repair_collect_btree_extent(sc->mp, 396 XFS_FSB_TO_AGNO(sc->mp, fsbno), 397 XFS_FSB_TO_AGBNO(sc->mp, fsbno), len); 398 399 rex = kmem_alloc(sizeof(struct xfs_repair_extent), KM_MAYFAIL); 400 if (!rex) 401 return -ENOMEM; 402 403 INIT_LIST_HEAD(&rex->list); 404 rex->fsbno = fsbno; 405 rex->len = len; 406 list_add_tail(&rex->list, &exlist->list); 407 408 return 0; 409 } 410 411 /* 412 * An error happened during the rebuild so the transaction will be cancelled. 413 * The fs will shut down, and the administrator has to unmount and run repair. 414 * Therefore, free all the memory associated with the list so we can die. 415 */ 416 void 417 xfs_repair_cancel_btree_extents( 418 struct xfs_scrub_context *sc, 419 struct xfs_repair_extent_list *exlist) 420 { 421 struct xfs_repair_extent *rex; 422 struct xfs_repair_extent *n; 423 424 for_each_xfs_repair_extent_safe(rex, n, exlist) { 425 list_del(&rex->list); 426 kmem_free(rex); 427 } 428 } 429 430 /* Compare two btree extents. */ 431 static int 432 xfs_repair_btree_extent_cmp( 433 void *priv, 434 struct list_head *a, 435 struct list_head *b) 436 { 437 struct xfs_repair_extent *ap; 438 struct xfs_repair_extent *bp; 439 440 ap = container_of(a, struct xfs_repair_extent, list); 441 bp = container_of(b, struct xfs_repair_extent, list); 442 443 if (ap->fsbno > bp->fsbno) 444 return 1; 445 if (ap->fsbno < bp->fsbno) 446 return -1; 447 return 0; 448 } 449 450 /* 451 * Remove all the blocks mentioned in @sublist from the extents in @exlist. 452 * 453 * The intent is that callers will iterate the rmapbt for all of its records 454 * for a given owner to generate @exlist; and iterate all the blocks of the 455 * metadata structures that are not being rebuilt and have the same rmapbt 456 * owner to generate @sublist. This routine subtracts all the extents 457 * mentioned in sublist from all the extents linked in @exlist, which leaves 458 * @exlist as the list of blocks that are not accounted for, which we assume 459 * are the dead blocks of the old metadata structure. The blocks mentioned in 460 * @exlist can be reaped. 461 */ 462 #define LEFT_ALIGNED (1 << 0) 463 #define RIGHT_ALIGNED (1 << 1) 464 int 465 xfs_repair_subtract_extents( 466 struct xfs_scrub_context *sc, 467 struct xfs_repair_extent_list *exlist, 468 struct xfs_repair_extent_list *sublist) 469 { 470 struct list_head *lp; 471 struct xfs_repair_extent *ex; 472 struct xfs_repair_extent *newex; 473 struct xfs_repair_extent *subex; 474 xfs_fsblock_t sub_fsb; 475 xfs_extlen_t sub_len; 476 int state; 477 int error = 0; 478 479 if (list_empty(&exlist->list) || list_empty(&sublist->list)) 480 return 0; 481 ASSERT(!list_empty(&sublist->list)); 482 483 list_sort(NULL, &exlist->list, xfs_repair_btree_extent_cmp); 484 list_sort(NULL, &sublist->list, xfs_repair_btree_extent_cmp); 485 486 /* 487 * Now that we've sorted both lists, we iterate exlist once, rolling 488 * forward through sublist and/or exlist as necessary until we find an 489 * overlap or reach the end of either list. We do not reset lp to the 490 * head of exlist nor do we reset subex to the head of sublist. The 491 * list traversal is similar to merge sort, but we're deleting 492 * instead. In this manner we avoid O(n^2) operations. 493 */ 494 subex = list_first_entry(&sublist->list, struct xfs_repair_extent, 495 list); 496 lp = exlist->list.next; 497 while (lp != &exlist->list) { 498 ex = list_entry(lp, struct xfs_repair_extent, list); 499 500 /* 501 * Advance subex and/or ex until we find a pair that 502 * intersect or we run out of extents. 503 */ 504 while (subex->fsbno + subex->len <= ex->fsbno) { 505 if (list_is_last(&subex->list, &sublist->list)) 506 goto out; 507 subex = list_next_entry(subex, list); 508 } 509 if (subex->fsbno >= ex->fsbno + ex->len) { 510 lp = lp->next; 511 continue; 512 } 513 514 /* trim subex to fit the extent we have */ 515 sub_fsb = subex->fsbno; 516 sub_len = subex->len; 517 if (subex->fsbno < ex->fsbno) { 518 sub_len -= ex->fsbno - subex->fsbno; 519 sub_fsb = ex->fsbno; 520 } 521 if (sub_len > ex->len) 522 sub_len = ex->len; 523 524 state = 0; 525 if (sub_fsb == ex->fsbno) 526 state |= LEFT_ALIGNED; 527 if (sub_fsb + sub_len == ex->fsbno + ex->len) 528 state |= RIGHT_ALIGNED; 529 switch (state) { 530 case LEFT_ALIGNED: 531 /* Coincides with only the left. */ 532 ex->fsbno += sub_len; 533 ex->len -= sub_len; 534 break; 535 case RIGHT_ALIGNED: 536 /* Coincides with only the right. */ 537 ex->len -= sub_len; 538 lp = lp->next; 539 break; 540 case LEFT_ALIGNED | RIGHT_ALIGNED: 541 /* Total overlap, just delete ex. */ 542 lp = lp->next; 543 list_del(&ex->list); 544 kmem_free(ex); 545 break; 546 case 0: 547 /* 548 * Deleting from the middle: add the new right extent 549 * and then shrink the left extent. 550 */ 551 newex = kmem_alloc(sizeof(struct xfs_repair_extent), 552 KM_MAYFAIL); 553 if (!newex) { 554 error = -ENOMEM; 555 goto out; 556 } 557 INIT_LIST_HEAD(&newex->list); 558 newex->fsbno = sub_fsb + sub_len; 559 newex->len = ex->fsbno + ex->len - newex->fsbno; 560 list_add(&newex->list, &ex->list); 561 ex->len = sub_fsb - ex->fsbno; 562 lp = lp->next; 563 break; 564 default: 565 ASSERT(0); 566 break; 567 } 568 } 569 570 out: 571 return error; 572 } 573 #undef LEFT_ALIGNED 574 #undef RIGHT_ALIGNED 575 576 /* 577 * Disposal of Blocks from Old per-AG Btrees 578 * 579 * Now that we've constructed a new btree to replace the damaged one, we want 580 * to dispose of the blocks that (we think) the old btree was using. 581 * Previously, we used the rmapbt to collect the extents (exlist) with the 582 * rmap owner corresponding to the tree we rebuilt, collected extents for any 583 * blocks with the same rmap owner that are owned by another data structure 584 * (sublist), and subtracted sublist from exlist. In theory the extents 585 * remaining in exlist are the old btree's blocks. 586 * 587 * Unfortunately, it's possible that the btree was crosslinked with other 588 * blocks on disk. The rmap data can tell us if there are multiple owners, so 589 * if the rmapbt says there is an owner of this block other than @oinfo, then 590 * the block is crosslinked. Remove the reverse mapping and continue. 591 * 592 * If there is one rmap record, we can free the block, which removes the 593 * reverse mapping but doesn't add the block to the free space. Our repair 594 * strategy is to hope the other metadata objects crosslinked on this block 595 * will be rebuilt (atop different blocks), thereby removing all the cross 596 * links. 597 * 598 * If there are no rmap records at all, we also free the block. If the btree 599 * being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't 600 * supposed to be a rmap record and everything is ok. For other btrees there 601 * had to have been an rmap entry for the block to have ended up on @exlist, 602 * so if it's gone now there's something wrong and the fs will shut down. 603 * 604 * Note: If there are multiple rmap records with only the same rmap owner as 605 * the btree we're trying to rebuild and the block is indeed owned by another 606 * data structure with the same rmap owner, then the block will be in sublist 607 * and therefore doesn't need disposal. If there are multiple rmap records 608 * with only the same rmap owner but the block is not owned by something with 609 * the same rmap owner, the block will be freed. 610 * 611 * The caller is responsible for locking the AG headers for the entire rebuild 612 * operation so that nothing else can sneak in and change the AG state while 613 * we're not looking. We also assume that the caller already invalidated any 614 * buffers associated with @exlist. 615 */ 616 617 /* 618 * Invalidate buffers for per-AG btree blocks we're dumping. This function 619 * is not intended for use with file data repairs; we have bunmapi for that. 620 */ 621 int 622 xfs_repair_invalidate_blocks( 623 struct xfs_scrub_context *sc, 624 struct xfs_repair_extent_list *exlist) 625 { 626 struct xfs_repair_extent *rex; 627 struct xfs_repair_extent *n; 628 struct xfs_buf *bp; 629 xfs_fsblock_t fsbno; 630 xfs_agblock_t i; 631 632 /* 633 * For each block in each extent, see if there's an incore buffer for 634 * exactly that block; if so, invalidate it. The buffer cache only 635 * lets us look for one buffer at a time, so we have to look one block 636 * at a time. Avoid invalidating AG headers and post-EOFS blocks 637 * because we never own those; and if we can't TRYLOCK the buffer we 638 * assume it's owned by someone else. 639 */ 640 for_each_xfs_repair_extent_safe(rex, n, exlist) { 641 for (fsbno = rex->fsbno, i = rex->len; i > 0; fsbno++, i--) { 642 /* Skip AG headers and post-EOFS blocks */ 643 if (!xfs_verify_fsbno(sc->mp, fsbno)) 644 continue; 645 bp = xfs_buf_incore(sc->mp->m_ddev_targp, 646 XFS_FSB_TO_DADDR(sc->mp, fsbno), 647 XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK); 648 if (bp) { 649 xfs_trans_bjoin(sc->tp, bp); 650 xfs_trans_binval(sc->tp, bp); 651 } 652 } 653 } 654 655 return 0; 656 } 657 658 /* Ensure the freelist is the correct size. */ 659 int 660 xfs_repair_fix_freelist( 661 struct xfs_scrub_context *sc, 662 bool can_shrink) 663 { 664 struct xfs_alloc_arg args = {0}; 665 666 args.mp = sc->mp; 667 args.tp = sc->tp; 668 args.agno = sc->sa.agno; 669 args.alignment = 1; 670 args.pag = sc->sa.pag; 671 672 return xfs_alloc_fix_freelist(&args, 673 can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK); 674 } 675 676 /* 677 * Put a block back on the AGFL. 678 */ 679 STATIC int 680 xfs_repair_put_freelist( 681 struct xfs_scrub_context *sc, 682 xfs_agblock_t agbno) 683 { 684 struct xfs_owner_info oinfo; 685 int error; 686 687 /* Make sure there's space on the freelist. */ 688 error = xfs_repair_fix_freelist(sc, true); 689 if (error) 690 return error; 691 692 /* 693 * Since we're "freeing" a lost block onto the AGFL, we have to 694 * create an rmap for the block prior to merging it or else other 695 * parts will break. 696 */ 697 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG); 698 error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.agno, agbno, 1, 699 &oinfo); 700 if (error) 701 return error; 702 703 /* Put the block on the AGFL. */ 704 error = xfs_alloc_put_freelist(sc->tp, sc->sa.agf_bp, sc->sa.agfl_bp, 705 agbno, 0); 706 if (error) 707 return error; 708 xfs_extent_busy_insert(sc->tp, sc->sa.agno, agbno, 1, 709 XFS_EXTENT_BUSY_SKIP_DISCARD); 710 711 return 0; 712 } 713 714 /* Dispose of a single metadata block. */ 715 STATIC int 716 xfs_repair_dispose_btree_block( 717 struct xfs_scrub_context *sc, 718 xfs_fsblock_t fsbno, 719 struct xfs_owner_info *oinfo, 720 enum xfs_ag_resv_type resv) 721 { 722 struct xfs_btree_cur *cur; 723 struct xfs_buf *agf_bp = NULL; 724 xfs_agnumber_t agno; 725 xfs_agblock_t agbno; 726 bool has_other_rmap; 727 int error; 728 729 agno = XFS_FSB_TO_AGNO(sc->mp, fsbno); 730 agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); 731 732 /* 733 * If we are repairing per-inode metadata, we need to read in the AGF 734 * buffer. Otherwise, we're repairing a per-AG structure, so reuse 735 * the AGF buffer that the setup functions already grabbed. 736 */ 737 if (sc->ip) { 738 error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf_bp); 739 if (error) 740 return error; 741 if (!agf_bp) 742 return -ENOMEM; 743 } else { 744 agf_bp = sc->sa.agf_bp; 745 } 746 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, agno); 747 748 /* Can we find any other rmappings? */ 749 error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap); 750 if (error) 751 goto out_cur; 752 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 753 754 /* 755 * If there are other rmappings, this block is cross linked and must 756 * not be freed. Remove the reverse mapping and move on. Otherwise, 757 * we were the only owner of the block, so free the extent, which will 758 * also remove the rmap. 759 * 760 * XXX: XFS doesn't support detecting the case where a single block 761 * metadata structure is crosslinked with a multi-block structure 762 * because the buffer cache doesn't detect aliasing problems, so we 763 * can't fix 100% of crosslinking problems (yet). The verifiers will 764 * blow on writeout, the filesystem will shut down, and the admin gets 765 * to run xfs_repair. 766 */ 767 if (has_other_rmap) 768 error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1, oinfo); 769 else if (resv == XFS_AG_RESV_AGFL) 770 error = xfs_repair_put_freelist(sc, agbno); 771 else 772 error = xfs_free_extent(sc->tp, fsbno, 1, oinfo, resv); 773 if (agf_bp != sc->sa.agf_bp) 774 xfs_trans_brelse(sc->tp, agf_bp); 775 if (error) 776 return error; 777 778 if (sc->ip) 779 return xfs_trans_roll_inode(&sc->tp, sc->ip); 780 return xfs_repair_roll_ag_trans(sc); 781 782 out_cur: 783 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 784 if (agf_bp != sc->sa.agf_bp) 785 xfs_trans_brelse(sc->tp, agf_bp); 786 return error; 787 } 788 789 /* Dispose of btree blocks from an old per-AG btree. */ 790 int 791 xfs_repair_reap_btree_extents( 792 struct xfs_scrub_context *sc, 793 struct xfs_repair_extent_list *exlist, 794 struct xfs_owner_info *oinfo, 795 enum xfs_ag_resv_type type) 796 { 797 struct xfs_repair_extent *rex; 798 struct xfs_repair_extent *n; 799 int error = 0; 800 801 ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb)); 802 803 /* Dispose of every block from the old btree. */ 804 for_each_xfs_repair_extent_safe(rex, n, exlist) { 805 ASSERT(sc->ip != NULL || 806 XFS_FSB_TO_AGNO(sc->mp, rex->fsbno) == sc->sa.agno); 807 808 trace_xfs_repair_dispose_btree_extent(sc->mp, 809 XFS_FSB_TO_AGNO(sc->mp, rex->fsbno), 810 XFS_FSB_TO_AGBNO(sc->mp, rex->fsbno), rex->len); 811 812 for (; rex->len > 0; rex->len--, rex->fsbno++) { 813 error = xfs_repair_dispose_btree_block(sc, rex->fsbno, 814 oinfo, type); 815 if (error) 816 goto out; 817 } 818 list_del(&rex->list); 819 kmem_free(rex); 820 } 821 822 out: 823 xfs_repair_cancel_btree_extents(sc, exlist); 824 return error; 825 } 826 827 /* 828 * Finding per-AG Btree Roots for AGF/AGI Reconstruction 829 * 830 * If the AGF or AGI become slightly corrupted, it may be necessary to rebuild 831 * the AG headers by using the rmap data to rummage through the AG looking for 832 * btree roots. This is not guaranteed to work if the AG is heavily damaged 833 * or the rmap data are corrupt. 834 * 835 * Callers of xfs_repair_find_ag_btree_roots must lock the AGF and AGFL 836 * buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the 837 * AGI is being rebuilt. It must maintain these locks until it's safe for 838 * other threads to change the btrees' shapes. The caller provides 839 * information about the btrees to look for by passing in an array of 840 * xfs_repair_find_ag_btree with the (rmap owner, buf_ops, magic) fields set. 841 * The (root, height) fields will be set on return if anything is found. The 842 * last element of the array should have a NULL buf_ops to mark the end of the 843 * array. 844 * 845 * For every rmapbt record matching any of the rmap owners in btree_info, 846 * read each block referenced by the rmap record. If the block is a btree 847 * block from this filesystem matching any of the magic numbers and has a 848 * level higher than what we've already seen, remember the block and the 849 * height of the tree required to have such a block. When the call completes, 850 * we return the highest block we've found for each btree description; those 851 * should be the roots. 852 */ 853 854 struct xfs_repair_findroot { 855 struct xfs_scrub_context *sc; 856 struct xfs_buf *agfl_bp; 857 struct xfs_agf *agf; 858 struct xfs_repair_find_ag_btree *btree_info; 859 }; 860 861 /* See if our block is in the AGFL. */ 862 STATIC int 863 xfs_repair_findroot_agfl_walk( 864 struct xfs_mount *mp, 865 xfs_agblock_t bno, 866 void *priv) 867 { 868 xfs_agblock_t *agbno = priv; 869 870 return (*agbno == bno) ? XFS_BTREE_QUERY_RANGE_ABORT : 0; 871 } 872 873 /* Does this block match the btree information passed in? */ 874 STATIC int 875 xfs_repair_findroot_block( 876 struct xfs_repair_findroot *ri, 877 struct xfs_repair_find_ag_btree *fab, 878 uint64_t owner, 879 xfs_agblock_t agbno, 880 bool *found_it) 881 { 882 struct xfs_mount *mp = ri->sc->mp; 883 struct xfs_buf *bp; 884 struct xfs_btree_block *btblock; 885 xfs_daddr_t daddr; 886 int error; 887 888 daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.agno, agbno); 889 890 /* 891 * Blocks in the AGFL have stale contents that might just happen to 892 * have a matching magic and uuid. We don't want to pull these blocks 893 * in as part of a tree root, so we have to filter out the AGFL stuff 894 * here. If the AGFL looks insane we'll just refuse to repair. 895 */ 896 if (owner == XFS_RMAP_OWN_AG) { 897 error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp, 898 xfs_repair_findroot_agfl_walk, &agbno); 899 if (error == XFS_BTREE_QUERY_RANGE_ABORT) 900 return 0; 901 if (error) 902 return error; 903 } 904 905 error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr, 906 mp->m_bsize, 0, &bp, NULL); 907 if (error) 908 return error; 909 910 /* 911 * Does this look like a block matching our fs and higher than any 912 * other block we've found so far? If so, reattach buffer verifiers 913 * so the AIL won't complain if the buffer is also dirty. 914 */ 915 btblock = XFS_BUF_TO_BLOCK(bp); 916 if (be32_to_cpu(btblock->bb_magic) != fab->magic) 917 goto out; 918 if (xfs_sb_version_hascrc(&mp->m_sb) && 919 !uuid_equal(&btblock->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid)) 920 goto out; 921 bp->b_ops = fab->buf_ops; 922 923 /* Ignore this block if it's lower in the tree than we've seen. */ 924 if (fab->root != NULLAGBLOCK && 925 xfs_btree_get_level(btblock) < fab->height) 926 goto out; 927 928 /* Make sure we pass the verifiers. */ 929 bp->b_ops->verify_read(bp); 930 if (bp->b_error) 931 goto out; 932 fab->root = agbno; 933 fab->height = xfs_btree_get_level(btblock) + 1; 934 *found_it = true; 935 936 trace_xfs_repair_findroot_block(mp, ri->sc->sa.agno, agbno, 937 be32_to_cpu(btblock->bb_magic), fab->height - 1); 938 out: 939 xfs_trans_brelse(ri->sc->tp, bp); 940 return error; 941 } 942 943 /* 944 * Do any of the blocks in this rmap record match one of the btrees we're 945 * looking for? 946 */ 947 STATIC int 948 xfs_repair_findroot_rmap( 949 struct xfs_btree_cur *cur, 950 struct xfs_rmap_irec *rec, 951 void *priv) 952 { 953 struct xfs_repair_findroot *ri = priv; 954 struct xfs_repair_find_ag_btree *fab; 955 xfs_agblock_t b; 956 bool found_it; 957 int error = 0; 958 959 /* Ignore anything that isn't AG metadata. */ 960 if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner)) 961 return 0; 962 963 /* Otherwise scan each block + btree type. */ 964 for (b = 0; b < rec->rm_blockcount; b++) { 965 found_it = false; 966 for (fab = ri->btree_info; fab->buf_ops; fab++) { 967 if (rec->rm_owner != fab->rmap_owner) 968 continue; 969 error = xfs_repair_findroot_block(ri, fab, 970 rec->rm_owner, rec->rm_startblock + b, 971 &found_it); 972 if (error) 973 return error; 974 if (found_it) 975 break; 976 } 977 } 978 979 return 0; 980 } 981 982 /* Find the roots of the per-AG btrees described in btree_info. */ 983 int 984 xfs_repair_find_ag_btree_roots( 985 struct xfs_scrub_context *sc, 986 struct xfs_buf *agf_bp, 987 struct xfs_repair_find_ag_btree *btree_info, 988 struct xfs_buf *agfl_bp) 989 { 990 struct xfs_mount *mp = sc->mp; 991 struct xfs_repair_findroot ri; 992 struct xfs_repair_find_ag_btree *fab; 993 struct xfs_btree_cur *cur; 994 int error; 995 996 ASSERT(xfs_buf_islocked(agf_bp)); 997 ASSERT(agfl_bp == NULL || xfs_buf_islocked(agfl_bp)); 998 999 ri.sc = sc; 1000 ri.btree_info = btree_info; 1001 ri.agf = XFS_BUF_TO_AGF(agf_bp); 1002 ri.agfl_bp = agfl_bp; 1003 for (fab = btree_info; fab->buf_ops; fab++) { 1004 ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG); 1005 ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner)); 1006 fab->root = NULLAGBLOCK; 1007 fab->height = 0; 1008 } 1009 1010 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno); 1011 error = xfs_rmap_query_all(cur, xfs_repair_findroot_rmap, &ri); 1012 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 1013 1014 return error; 1015 } 1016 1017 /* Force a quotacheck the next time we mount. */ 1018 void 1019 xfs_repair_force_quotacheck( 1020 struct xfs_scrub_context *sc, 1021 uint dqtype) 1022 { 1023 uint flag; 1024 1025 flag = xfs_quota_chkd_flag(dqtype); 1026 if (!(flag & sc->mp->m_qflags)) 1027 return; 1028 1029 sc->mp->m_qflags &= ~flag; 1030 spin_lock(&sc->mp->m_sb_lock); 1031 sc->mp->m_sb.sb_qflags &= ~flag; 1032 spin_unlock(&sc->mp->m_sb_lock); 1033 xfs_log_sb(sc->tp); 1034 } 1035 1036 /* 1037 * Attach dquots to this inode, or schedule quotacheck to fix them. 1038 * 1039 * This function ensures that the appropriate dquots are attached to an inode. 1040 * We cannot allow the dquot code to allocate an on-disk dquot block here 1041 * because we're already in transaction context with the inode locked. The 1042 * on-disk dquot should already exist anyway. If the quota code signals 1043 * corruption or missing quota information, schedule quotacheck, which will 1044 * repair corruptions in the quota metadata. 1045 */ 1046 int 1047 xfs_repair_ino_dqattach( 1048 struct xfs_scrub_context *sc) 1049 { 1050 int error; 1051 1052 error = xfs_qm_dqattach_locked(sc->ip, false); 1053 switch (error) { 1054 case -EFSBADCRC: 1055 case -EFSCORRUPTED: 1056 case -ENOENT: 1057 xfs_err_ratelimited(sc->mp, 1058 "inode %llu repair encountered quota error %d, quotacheck forced.", 1059 (unsigned long long)sc->ip->i_ino, error); 1060 if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot) 1061 xfs_repair_force_quotacheck(sc, XFS_DQ_USER); 1062 if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot) 1063 xfs_repair_force_quotacheck(sc, XFS_DQ_GROUP); 1064 if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot) 1065 xfs_repair_force_quotacheck(sc, XFS_DQ_PROJ); 1066 /* fall through */ 1067 case -ESRCH: 1068 error = 0; 1069 break; 1070 default: 1071 break; 1072 } 1073 1074 return error; 1075 } 1076