1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2018 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_btree.h" 13 #include "xfs_log_format.h" 14 #include "xfs_trans.h" 15 #include "xfs_sb.h" 16 #include "xfs_alloc.h" 17 #include "xfs_alloc_btree.h" 18 #include "xfs_ialloc.h" 19 #include "xfs_ialloc_btree.h" 20 #include "xfs_rmap.h" 21 #include "xfs_rmap_btree.h" 22 #include "xfs_refcount_btree.h" 23 #include "xfs_ag.h" 24 #include "scrub/scrub.h" 25 #include "scrub/common.h" 26 #include "scrub/trace.h" 27 #include "scrub/repair.h" 28 #include "scrub/bitmap.h" 29 30 /* Superblock */ 31 32 /* Repair the superblock. */ 33 int 34 xrep_superblock( 35 struct xfs_scrub *sc) 36 { 37 struct xfs_mount *mp = sc->mp; 38 struct xfs_buf *bp; 39 xfs_agnumber_t agno; 40 int error; 41 42 /* Don't try to repair AG 0's sb; let xfs_repair deal with it. */ 43 agno = sc->sm->sm_agno; 44 if (agno == 0) 45 return -EOPNOTSUPP; 46 47 error = xfs_sb_get_secondary(mp, sc->tp, agno, &bp); 48 if (error) 49 return error; 50 51 /* Copy AG 0's superblock to this one. */ 52 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); 53 xfs_sb_to_disk(bp->b_addr, &mp->m_sb); 54 55 /* 56 * Don't write out a secondary super with NEEDSREPAIR or log incompat 57 * features set, since both are ignored when set on a secondary. 58 */ 59 if (xfs_has_crc(mp)) { 60 struct xfs_dsb *sb = bp->b_addr; 61 62 sb->sb_features_incompat &= 63 ~cpu_to_be32(XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR); 64 sb->sb_features_log_incompat = 0; 65 } 66 67 /* Write this to disk. */ 68 xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF); 69 xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1); 70 return error; 71 } 72 73 /* AGF */ 74 75 struct xrep_agf_allocbt { 76 struct xfs_scrub *sc; 77 xfs_agblock_t freeblks; 78 xfs_agblock_t longest; 79 }; 80 81 /* Record free space shape information. */ 82 STATIC int 83 xrep_agf_walk_allocbt( 84 struct xfs_btree_cur *cur, 85 const struct xfs_alloc_rec_incore *rec, 86 void *priv) 87 { 88 struct xrep_agf_allocbt *raa = priv; 89 int error = 0; 90 91 if (xchk_should_terminate(raa->sc, &error)) 92 return error; 93 94 raa->freeblks += rec->ar_blockcount; 95 if (rec->ar_blockcount > raa->longest) 96 raa->longest = rec->ar_blockcount; 97 return error; 98 } 99 100 /* Does this AGFL block look sane? */ 101 STATIC int 102 xrep_agf_check_agfl_block( 103 struct xfs_mount *mp, 104 xfs_agblock_t agbno, 105 void *priv) 106 { 107 struct xfs_scrub *sc = priv; 108 109 if (!xfs_verify_agbno(sc->sa.pag, agbno)) 110 return -EFSCORRUPTED; 111 return 0; 112 } 113 114 /* 115 * Offset within the xrep_find_ag_btree array for each btree type. Avoid the 116 * XFS_BTNUM_ names here to avoid creating a sparse array. 117 */ 118 enum { 119 XREP_AGF_BNOBT = 0, 120 XREP_AGF_CNTBT, 121 XREP_AGF_RMAPBT, 122 XREP_AGF_REFCOUNTBT, 123 XREP_AGF_END, 124 XREP_AGF_MAX 125 }; 126 127 /* Check a btree root candidate. */ 128 static inline bool 129 xrep_check_btree_root( 130 struct xfs_scrub *sc, 131 struct xrep_find_ag_btree *fab) 132 { 133 return xfs_verify_agbno(sc->sa.pag, fab->root) && 134 fab->height <= fab->maxlevels; 135 } 136 137 /* 138 * Given the btree roots described by *fab, find the roots, check them for 139 * sanity, and pass the root data back out via *fab. 140 * 141 * This is /also/ a chicken and egg problem because we have to use the rmapbt 142 * (rooted in the AGF) to find the btrees rooted in the AGF. We also have no 143 * idea if the btrees make any sense. If we hit obvious corruptions in those 144 * btrees we'll bail out. 145 */ 146 STATIC int 147 xrep_agf_find_btrees( 148 struct xfs_scrub *sc, 149 struct xfs_buf *agf_bp, 150 struct xrep_find_ag_btree *fab, 151 struct xfs_buf *agfl_bp) 152 { 153 struct xfs_agf *old_agf = agf_bp->b_addr; 154 int error; 155 156 /* Go find the root data. */ 157 error = xrep_find_ag_btree_roots(sc, agf_bp, fab, agfl_bp); 158 if (error) 159 return error; 160 161 /* We must find the bnobt, cntbt, and rmapbt roots. */ 162 if (!xrep_check_btree_root(sc, &fab[XREP_AGF_BNOBT]) || 163 !xrep_check_btree_root(sc, &fab[XREP_AGF_CNTBT]) || 164 !xrep_check_btree_root(sc, &fab[XREP_AGF_RMAPBT])) 165 return -EFSCORRUPTED; 166 167 /* 168 * We relied on the rmapbt to reconstruct the AGF. If we get a 169 * different root then something's seriously wrong. 170 */ 171 if (fab[XREP_AGF_RMAPBT].root != 172 be32_to_cpu(old_agf->agf_roots[XFS_BTNUM_RMAPi])) 173 return -EFSCORRUPTED; 174 175 /* We must find the refcountbt root if that feature is enabled. */ 176 if (xfs_has_reflink(sc->mp) && 177 !xrep_check_btree_root(sc, &fab[XREP_AGF_REFCOUNTBT])) 178 return -EFSCORRUPTED; 179 180 return 0; 181 } 182 183 /* 184 * Reinitialize the AGF header, making an in-core copy of the old contents so 185 * that we know which in-core state needs to be reinitialized. 186 */ 187 STATIC void 188 xrep_agf_init_header( 189 struct xfs_scrub *sc, 190 struct xfs_buf *agf_bp, 191 struct xfs_agf *old_agf) 192 { 193 struct xfs_mount *mp = sc->mp; 194 struct xfs_perag *pag = sc->sa.pag; 195 struct xfs_agf *agf = agf_bp->b_addr; 196 197 memcpy(old_agf, agf, sizeof(*old_agf)); 198 memset(agf, 0, BBTOB(agf_bp->b_length)); 199 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); 200 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); 201 agf->agf_seqno = cpu_to_be32(pag->pag_agno); 202 agf->agf_length = cpu_to_be32(pag->block_count); 203 agf->agf_flfirst = old_agf->agf_flfirst; 204 agf->agf_fllast = old_agf->agf_fllast; 205 agf->agf_flcount = old_agf->agf_flcount; 206 if (xfs_has_crc(mp)) 207 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid); 208 209 /* Mark the incore AGF data stale until we're done fixing things. */ 210 ASSERT(xfs_perag_initialised_agf(pag)); 211 clear_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate); 212 } 213 214 /* Set btree root information in an AGF. */ 215 STATIC void 216 xrep_agf_set_roots( 217 struct xfs_scrub *sc, 218 struct xfs_agf *agf, 219 struct xrep_find_ag_btree *fab) 220 { 221 agf->agf_roots[XFS_BTNUM_BNOi] = 222 cpu_to_be32(fab[XREP_AGF_BNOBT].root); 223 agf->agf_levels[XFS_BTNUM_BNOi] = 224 cpu_to_be32(fab[XREP_AGF_BNOBT].height); 225 226 agf->agf_roots[XFS_BTNUM_CNTi] = 227 cpu_to_be32(fab[XREP_AGF_CNTBT].root); 228 agf->agf_levels[XFS_BTNUM_CNTi] = 229 cpu_to_be32(fab[XREP_AGF_CNTBT].height); 230 231 agf->agf_roots[XFS_BTNUM_RMAPi] = 232 cpu_to_be32(fab[XREP_AGF_RMAPBT].root); 233 agf->agf_levels[XFS_BTNUM_RMAPi] = 234 cpu_to_be32(fab[XREP_AGF_RMAPBT].height); 235 236 if (xfs_has_reflink(sc->mp)) { 237 agf->agf_refcount_root = 238 cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].root); 239 agf->agf_refcount_level = 240 cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].height); 241 } 242 } 243 244 /* Update all AGF fields which derive from btree contents. */ 245 STATIC int 246 xrep_agf_calc_from_btrees( 247 struct xfs_scrub *sc, 248 struct xfs_buf *agf_bp) 249 { 250 struct xrep_agf_allocbt raa = { .sc = sc }; 251 struct xfs_btree_cur *cur = NULL; 252 struct xfs_agf *agf = agf_bp->b_addr; 253 struct xfs_mount *mp = sc->mp; 254 xfs_agblock_t btreeblks; 255 xfs_agblock_t blocks; 256 int error; 257 258 /* Update the AGF counters from the bnobt. */ 259 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, 260 sc->sa.pag, XFS_BTNUM_BNO); 261 error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa); 262 if (error) 263 goto err; 264 error = xfs_btree_count_blocks(cur, &blocks); 265 if (error) 266 goto err; 267 xfs_btree_del_cursor(cur, error); 268 btreeblks = blocks - 1; 269 agf->agf_freeblks = cpu_to_be32(raa.freeblks); 270 agf->agf_longest = cpu_to_be32(raa.longest); 271 272 /* Update the AGF counters from the cntbt. */ 273 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, 274 sc->sa.pag, XFS_BTNUM_CNT); 275 error = xfs_btree_count_blocks(cur, &blocks); 276 if (error) 277 goto err; 278 xfs_btree_del_cursor(cur, error); 279 btreeblks += blocks - 1; 280 281 /* Update the AGF counters from the rmapbt. */ 282 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); 283 error = xfs_btree_count_blocks(cur, &blocks); 284 if (error) 285 goto err; 286 xfs_btree_del_cursor(cur, error); 287 agf->agf_rmap_blocks = cpu_to_be32(blocks); 288 btreeblks += blocks - 1; 289 290 agf->agf_btreeblks = cpu_to_be32(btreeblks); 291 292 /* Update the AGF counters from the refcountbt. */ 293 if (xfs_has_reflink(mp)) { 294 cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp, 295 sc->sa.pag); 296 error = xfs_btree_count_blocks(cur, &blocks); 297 if (error) 298 goto err; 299 xfs_btree_del_cursor(cur, error); 300 agf->agf_refcount_blocks = cpu_to_be32(blocks); 301 } 302 303 return 0; 304 err: 305 xfs_btree_del_cursor(cur, error); 306 return error; 307 } 308 309 /* Commit the new AGF and reinitialize the incore state. */ 310 STATIC int 311 xrep_agf_commit_new( 312 struct xfs_scrub *sc, 313 struct xfs_buf *agf_bp) 314 { 315 struct xfs_perag *pag; 316 struct xfs_agf *agf = agf_bp->b_addr; 317 318 /* Trigger fdblocks recalculation */ 319 xfs_force_summary_recalc(sc->mp); 320 321 /* Write this to disk. */ 322 xfs_trans_buf_set_type(sc->tp, agf_bp, XFS_BLFT_AGF_BUF); 323 xfs_trans_log_buf(sc->tp, agf_bp, 0, BBTOB(agf_bp->b_length) - 1); 324 325 /* Now reinitialize the in-core counters we changed. */ 326 pag = sc->sa.pag; 327 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks); 328 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); 329 pag->pagf_longest = be32_to_cpu(agf->agf_longest); 330 pag->pagf_levels[XFS_BTNUM_BNOi] = 331 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]); 332 pag->pagf_levels[XFS_BTNUM_CNTi] = 333 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); 334 pag->pagf_levels[XFS_BTNUM_RMAPi] = 335 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]); 336 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level); 337 set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate); 338 339 return 0; 340 } 341 342 /* Repair the AGF. v5 filesystems only. */ 343 int 344 xrep_agf( 345 struct xfs_scrub *sc) 346 { 347 struct xrep_find_ag_btree fab[XREP_AGF_MAX] = { 348 [XREP_AGF_BNOBT] = { 349 .rmap_owner = XFS_RMAP_OWN_AG, 350 .buf_ops = &xfs_bnobt_buf_ops, 351 .maxlevels = sc->mp->m_alloc_maxlevels, 352 }, 353 [XREP_AGF_CNTBT] = { 354 .rmap_owner = XFS_RMAP_OWN_AG, 355 .buf_ops = &xfs_cntbt_buf_ops, 356 .maxlevels = sc->mp->m_alloc_maxlevels, 357 }, 358 [XREP_AGF_RMAPBT] = { 359 .rmap_owner = XFS_RMAP_OWN_AG, 360 .buf_ops = &xfs_rmapbt_buf_ops, 361 .maxlevels = sc->mp->m_rmap_maxlevels, 362 }, 363 [XREP_AGF_REFCOUNTBT] = { 364 .rmap_owner = XFS_RMAP_OWN_REFC, 365 .buf_ops = &xfs_refcountbt_buf_ops, 366 .maxlevels = sc->mp->m_refc_maxlevels, 367 }, 368 [XREP_AGF_END] = { 369 .buf_ops = NULL, 370 }, 371 }; 372 struct xfs_agf old_agf; 373 struct xfs_mount *mp = sc->mp; 374 struct xfs_buf *agf_bp; 375 struct xfs_buf *agfl_bp; 376 struct xfs_agf *agf; 377 int error; 378 379 /* We require the rmapbt to rebuild anything. */ 380 if (!xfs_has_rmapbt(mp)) 381 return -EOPNOTSUPP; 382 383 /* 384 * Make sure we have the AGF buffer, as scrub might have decided it 385 * was corrupt after xfs_alloc_read_agf failed with -EFSCORRUPTED. 386 */ 387 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp, 388 XFS_AG_DADDR(mp, sc->sa.pag->pag_agno, 389 XFS_AGF_DADDR(mp)), 390 XFS_FSS_TO_BB(mp, 1), 0, &agf_bp, NULL); 391 if (error) 392 return error; 393 agf_bp->b_ops = &xfs_agf_buf_ops; 394 agf = agf_bp->b_addr; 395 396 /* 397 * Load the AGFL so that we can screen out OWN_AG blocks that are on 398 * the AGFL now; these blocks might have once been part of the 399 * bno/cnt/rmap btrees but are not now. This is a chicken and egg 400 * problem: the AGF is corrupt, so we have to trust the AGFL contents 401 * because we can't do any serious cross-referencing with any of the 402 * btrees rooted in the AGF. If the AGFL contents are obviously bad 403 * then we'll bail out. 404 */ 405 error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); 406 if (error) 407 return error; 408 409 /* 410 * Spot-check the AGFL blocks; if they're obviously corrupt then 411 * there's nothing we can do but bail out. 412 */ 413 error = xfs_agfl_walk(sc->mp, agf_bp->b_addr, agfl_bp, 414 xrep_agf_check_agfl_block, sc); 415 if (error) 416 return error; 417 418 /* 419 * Find the AGF btree roots. This is also a chicken-and-egg situation; 420 * see the function for more details. 421 */ 422 error = xrep_agf_find_btrees(sc, agf_bp, fab, agfl_bp); 423 if (error) 424 return error; 425 426 /* Start rewriting the header and implant the btrees we found. */ 427 xrep_agf_init_header(sc, agf_bp, &old_agf); 428 xrep_agf_set_roots(sc, agf, fab); 429 error = xrep_agf_calc_from_btrees(sc, agf_bp); 430 if (error) 431 goto out_revert; 432 433 /* Commit the changes and reinitialize incore state. */ 434 return xrep_agf_commit_new(sc, agf_bp); 435 436 out_revert: 437 /* Mark the incore AGF state stale and revert the AGF. */ 438 clear_bit(XFS_AGSTATE_AGF_INIT, &sc->sa.pag->pag_opstate); 439 memcpy(agf, &old_agf, sizeof(old_agf)); 440 return error; 441 } 442 443 /* AGFL */ 444 445 struct xrep_agfl { 446 /* Bitmap of alleged AGFL blocks that we're not going to add. */ 447 struct xbitmap crossed; 448 449 /* Bitmap of other OWN_AG metadata blocks. */ 450 struct xbitmap agmetablocks; 451 452 /* Bitmap of free space. */ 453 struct xbitmap *freesp; 454 455 /* rmapbt cursor for finding crosslinked blocks */ 456 struct xfs_btree_cur *rmap_cur; 457 458 struct xfs_scrub *sc; 459 }; 460 461 /* Record all OWN_AG (free space btree) information from the rmap data. */ 462 STATIC int 463 xrep_agfl_walk_rmap( 464 struct xfs_btree_cur *cur, 465 const struct xfs_rmap_irec *rec, 466 void *priv) 467 { 468 struct xrep_agfl *ra = priv; 469 xfs_fsblock_t fsb; 470 int error = 0; 471 472 if (xchk_should_terminate(ra->sc, &error)) 473 return error; 474 475 /* Record all the OWN_AG blocks. */ 476 if (rec->rm_owner == XFS_RMAP_OWN_AG) { 477 fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno, 478 rec->rm_startblock); 479 error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount); 480 if (error) 481 return error; 482 } 483 484 return xbitmap_set_btcur_path(&ra->agmetablocks, cur); 485 } 486 487 /* Strike out the blocks that are cross-linked according to the rmapbt. */ 488 STATIC int 489 xrep_agfl_check_extent( 490 struct xrep_agfl *ra, 491 uint64_t start, 492 uint64_t len) 493 { 494 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(ra->sc->mp, start); 495 xfs_agblock_t last_agbno = agbno + len - 1; 496 int error; 497 498 ASSERT(XFS_FSB_TO_AGNO(ra->sc->mp, start) == ra->sc->sa.pag->pag_agno); 499 500 while (agbno <= last_agbno) { 501 bool other_owners; 502 503 error = xfs_rmap_has_other_keys(ra->rmap_cur, agbno, 1, 504 &XFS_RMAP_OINFO_AG, &other_owners); 505 if (error) 506 return error; 507 508 if (other_owners) { 509 error = xbitmap_set(&ra->crossed, agbno, 1); 510 if (error) 511 return error; 512 } 513 514 if (xchk_should_terminate(ra->sc, &error)) 515 return error; 516 agbno++; 517 } 518 519 return 0; 520 } 521 522 /* 523 * Map out all the non-AGFL OWN_AG space in this AG so that we can deduce 524 * which blocks belong to the AGFL. 525 * 526 * Compute the set of old AGFL blocks by subtracting from the list of OWN_AG 527 * blocks the list of blocks owned by all other OWN_AG metadata (bnobt, cntbt, 528 * rmapbt). These are the old AGFL blocks, so return that list and the number 529 * of blocks we're actually going to put back on the AGFL. 530 */ 531 STATIC int 532 xrep_agfl_collect_blocks( 533 struct xfs_scrub *sc, 534 struct xfs_buf *agf_bp, 535 struct xbitmap *agfl_extents, 536 xfs_agblock_t *flcount) 537 { 538 struct xrep_agfl ra; 539 struct xfs_mount *mp = sc->mp; 540 struct xfs_btree_cur *cur; 541 struct xbitmap_range *br, *n; 542 int error; 543 544 ra.sc = sc; 545 ra.freesp = agfl_extents; 546 xbitmap_init(&ra.agmetablocks); 547 xbitmap_init(&ra.crossed); 548 549 /* Find all space used by the free space btrees & rmapbt. */ 550 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); 551 error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra); 552 xfs_btree_del_cursor(cur, error); 553 if (error) 554 goto out_bmp; 555 556 /* Find all blocks currently being used by the bnobt. */ 557 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, 558 sc->sa.pag, XFS_BTNUM_BNO); 559 error = xbitmap_set_btblocks(&ra.agmetablocks, cur); 560 xfs_btree_del_cursor(cur, error); 561 if (error) 562 goto out_bmp; 563 564 /* Find all blocks currently being used by the cntbt. */ 565 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, 566 sc->sa.pag, XFS_BTNUM_CNT); 567 error = xbitmap_set_btblocks(&ra.agmetablocks, cur); 568 xfs_btree_del_cursor(cur, error); 569 if (error) 570 goto out_bmp; 571 572 /* 573 * Drop the freesp meta blocks that are in use by btrees. 574 * The remaining blocks /should/ be AGFL blocks. 575 */ 576 error = xbitmap_disunion(agfl_extents, &ra.agmetablocks); 577 if (error) 578 goto out_bmp; 579 580 /* Strike out the blocks that are cross-linked. */ 581 ra.rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); 582 for_each_xbitmap_extent(br, n, agfl_extents) { 583 error = xrep_agfl_check_extent(&ra, br->start, br->len); 584 if (error) 585 break; 586 } 587 xfs_btree_del_cursor(ra.rmap_cur, error); 588 if (error) 589 goto out_bmp; 590 error = xbitmap_disunion(agfl_extents, &ra.crossed); 591 if (error) 592 goto out_bmp; 593 594 /* 595 * Calculate the new AGFL size. If we found more blocks than fit in 596 * the AGFL we'll free them later. 597 */ 598 *flcount = min_t(uint64_t, xbitmap_hweight(agfl_extents), 599 xfs_agfl_size(mp)); 600 601 out_bmp: 602 xbitmap_destroy(&ra.crossed); 603 xbitmap_destroy(&ra.agmetablocks); 604 return error; 605 } 606 607 /* Update the AGF and reset the in-core state. */ 608 STATIC void 609 xrep_agfl_update_agf( 610 struct xfs_scrub *sc, 611 struct xfs_buf *agf_bp, 612 xfs_agblock_t flcount) 613 { 614 struct xfs_agf *agf = agf_bp->b_addr; 615 616 ASSERT(flcount <= xfs_agfl_size(sc->mp)); 617 618 /* Trigger fdblocks recalculation */ 619 xfs_force_summary_recalc(sc->mp); 620 621 /* Update the AGF counters. */ 622 if (xfs_perag_initialised_agf(sc->sa.pag)) 623 sc->sa.pag->pagf_flcount = flcount; 624 agf->agf_flfirst = cpu_to_be32(0); 625 agf->agf_flcount = cpu_to_be32(flcount); 626 agf->agf_fllast = cpu_to_be32(flcount - 1); 627 628 xfs_alloc_log_agf(sc->tp, agf_bp, 629 XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); 630 } 631 632 /* Write out a totally new AGFL. */ 633 STATIC void 634 xrep_agfl_init_header( 635 struct xfs_scrub *sc, 636 struct xfs_buf *agfl_bp, 637 struct xbitmap *agfl_extents, 638 xfs_agblock_t flcount) 639 { 640 struct xfs_mount *mp = sc->mp; 641 __be32 *agfl_bno; 642 struct xbitmap_range *br; 643 struct xbitmap_range *n; 644 struct xfs_agfl *agfl; 645 xfs_agblock_t agbno; 646 unsigned int fl_off; 647 648 ASSERT(flcount <= xfs_agfl_size(mp)); 649 650 /* 651 * Start rewriting the header by setting the bno[] array to 652 * NULLAGBLOCK, then setting AGFL header fields. 653 */ 654 agfl = XFS_BUF_TO_AGFL(agfl_bp); 655 memset(agfl, 0xFF, BBTOB(agfl_bp->b_length)); 656 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); 657 agfl->agfl_seqno = cpu_to_be32(sc->sa.pag->pag_agno); 658 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid); 659 660 /* 661 * Fill the AGFL with the remaining blocks. If agfl_extents has more 662 * blocks than fit in the AGFL, they will be freed in a subsequent 663 * step. 664 */ 665 fl_off = 0; 666 agfl_bno = xfs_buf_to_agfl_bno(agfl_bp); 667 for_each_xbitmap_extent(br, n, agfl_extents) { 668 agbno = XFS_FSB_TO_AGBNO(mp, br->start); 669 670 trace_xrep_agfl_insert(mp, sc->sa.pag->pag_agno, agbno, 671 br->len); 672 673 while (br->len > 0 && fl_off < flcount) { 674 agfl_bno[fl_off] = cpu_to_be32(agbno); 675 fl_off++; 676 agbno++; 677 678 /* 679 * We've now used br->start by putting it in the AGFL, 680 * so bump br so that we don't reap the block later. 681 */ 682 br->start++; 683 br->len--; 684 } 685 686 if (br->len) 687 break; 688 list_del(&br->list); 689 kfree(br); 690 } 691 692 /* Write new AGFL to disk. */ 693 xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF); 694 xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1); 695 } 696 697 /* Repair the AGFL. */ 698 int 699 xrep_agfl( 700 struct xfs_scrub *sc) 701 { 702 struct xbitmap agfl_extents; 703 struct xfs_mount *mp = sc->mp; 704 struct xfs_buf *agf_bp; 705 struct xfs_buf *agfl_bp; 706 xfs_agblock_t flcount; 707 int error; 708 709 /* We require the rmapbt to rebuild anything. */ 710 if (!xfs_has_rmapbt(mp)) 711 return -EOPNOTSUPP; 712 713 xbitmap_init(&agfl_extents); 714 715 /* 716 * Read the AGF so that we can query the rmapbt. We hope that there's 717 * nothing wrong with the AGF, but all the AG header repair functions 718 * have this chicken-and-egg problem. 719 */ 720 error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp); 721 if (error) 722 return error; 723 724 /* 725 * Make sure we have the AGFL buffer, as scrub might have decided it 726 * was corrupt after xfs_alloc_read_agfl failed with -EFSCORRUPTED. 727 */ 728 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp, 729 XFS_AG_DADDR(mp, sc->sa.pag->pag_agno, 730 XFS_AGFL_DADDR(mp)), 731 XFS_FSS_TO_BB(mp, 1), 0, &agfl_bp, NULL); 732 if (error) 733 return error; 734 agfl_bp->b_ops = &xfs_agfl_buf_ops; 735 736 /* Gather all the extents we're going to put on the new AGFL. */ 737 error = xrep_agfl_collect_blocks(sc, agf_bp, &agfl_extents, &flcount); 738 if (error) 739 goto err; 740 741 /* 742 * Update AGF and AGFL. We reset the global free block counter when 743 * we adjust the AGF flcount (which can fail) so avoid updating any 744 * buffers until we know that part works. 745 */ 746 xrep_agfl_update_agf(sc, agf_bp, flcount); 747 xrep_agfl_init_header(sc, agfl_bp, &agfl_extents, flcount); 748 749 /* 750 * Ok, the AGFL should be ready to go now. Roll the transaction to 751 * make the new AGFL permanent before we start using it to return 752 * freespace overflow to the freespace btrees. 753 */ 754 sc->sa.agf_bp = agf_bp; 755 error = xrep_roll_ag_trans(sc); 756 if (error) 757 goto err; 758 759 /* Dump any AGFL overflow. */ 760 error = xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG, 761 XFS_AG_RESV_AGFL); 762 err: 763 xbitmap_destroy(&agfl_extents); 764 return error; 765 } 766 767 /* AGI */ 768 769 /* 770 * Offset within the xrep_find_ag_btree array for each btree type. Avoid the 771 * XFS_BTNUM_ names here to avoid creating a sparse array. 772 */ 773 enum { 774 XREP_AGI_INOBT = 0, 775 XREP_AGI_FINOBT, 776 XREP_AGI_END, 777 XREP_AGI_MAX 778 }; 779 780 /* 781 * Given the inode btree roots described by *fab, find the roots, check them 782 * for sanity, and pass the root data back out via *fab. 783 */ 784 STATIC int 785 xrep_agi_find_btrees( 786 struct xfs_scrub *sc, 787 struct xrep_find_ag_btree *fab) 788 { 789 struct xfs_buf *agf_bp; 790 struct xfs_mount *mp = sc->mp; 791 int error; 792 793 /* Read the AGF. */ 794 error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp); 795 if (error) 796 return error; 797 798 /* Find the btree roots. */ 799 error = xrep_find_ag_btree_roots(sc, agf_bp, fab, NULL); 800 if (error) 801 return error; 802 803 /* We must find the inobt root. */ 804 if (!xrep_check_btree_root(sc, &fab[XREP_AGI_INOBT])) 805 return -EFSCORRUPTED; 806 807 /* We must find the finobt root if that feature is enabled. */ 808 if (xfs_has_finobt(mp) && 809 !xrep_check_btree_root(sc, &fab[XREP_AGI_FINOBT])) 810 return -EFSCORRUPTED; 811 812 return 0; 813 } 814 815 /* 816 * Reinitialize the AGI header, making an in-core copy of the old contents so 817 * that we know which in-core state needs to be reinitialized. 818 */ 819 STATIC void 820 xrep_agi_init_header( 821 struct xfs_scrub *sc, 822 struct xfs_buf *agi_bp, 823 struct xfs_agi *old_agi) 824 { 825 struct xfs_agi *agi = agi_bp->b_addr; 826 struct xfs_perag *pag = sc->sa.pag; 827 struct xfs_mount *mp = sc->mp; 828 829 memcpy(old_agi, agi, sizeof(*old_agi)); 830 memset(agi, 0, BBTOB(agi_bp->b_length)); 831 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); 832 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); 833 agi->agi_seqno = cpu_to_be32(pag->pag_agno); 834 agi->agi_length = cpu_to_be32(pag->block_count); 835 agi->agi_newino = cpu_to_be32(NULLAGINO); 836 agi->agi_dirino = cpu_to_be32(NULLAGINO); 837 if (xfs_has_crc(mp)) 838 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid); 839 840 /* We don't know how to fix the unlinked list yet. */ 841 memcpy(&agi->agi_unlinked, &old_agi->agi_unlinked, 842 sizeof(agi->agi_unlinked)); 843 844 /* Mark the incore AGF data stale until we're done fixing things. */ 845 ASSERT(xfs_perag_initialised_agi(pag)); 846 clear_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate); 847 } 848 849 /* Set btree root information in an AGI. */ 850 STATIC void 851 xrep_agi_set_roots( 852 struct xfs_scrub *sc, 853 struct xfs_agi *agi, 854 struct xrep_find_ag_btree *fab) 855 { 856 agi->agi_root = cpu_to_be32(fab[XREP_AGI_INOBT].root); 857 agi->agi_level = cpu_to_be32(fab[XREP_AGI_INOBT].height); 858 859 if (xfs_has_finobt(sc->mp)) { 860 agi->agi_free_root = cpu_to_be32(fab[XREP_AGI_FINOBT].root); 861 agi->agi_free_level = cpu_to_be32(fab[XREP_AGI_FINOBT].height); 862 } 863 } 864 865 /* Update the AGI counters. */ 866 STATIC int 867 xrep_agi_calc_from_btrees( 868 struct xfs_scrub *sc, 869 struct xfs_buf *agi_bp) 870 { 871 struct xfs_btree_cur *cur; 872 struct xfs_agi *agi = agi_bp->b_addr; 873 struct xfs_mount *mp = sc->mp; 874 xfs_agino_t count; 875 xfs_agino_t freecount; 876 int error; 877 878 cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp, XFS_BTNUM_INO); 879 error = xfs_ialloc_count_inodes(cur, &count, &freecount); 880 if (error) 881 goto err; 882 if (xfs_has_inobtcounts(mp)) { 883 xfs_agblock_t blocks; 884 885 error = xfs_btree_count_blocks(cur, &blocks); 886 if (error) 887 goto err; 888 agi->agi_iblocks = cpu_to_be32(blocks); 889 } 890 xfs_btree_del_cursor(cur, error); 891 892 agi->agi_count = cpu_to_be32(count); 893 agi->agi_freecount = cpu_to_be32(freecount); 894 895 if (xfs_has_finobt(mp) && xfs_has_inobtcounts(mp)) { 896 xfs_agblock_t blocks; 897 898 cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp, 899 XFS_BTNUM_FINO); 900 error = xfs_btree_count_blocks(cur, &blocks); 901 if (error) 902 goto err; 903 xfs_btree_del_cursor(cur, error); 904 agi->agi_fblocks = cpu_to_be32(blocks); 905 } 906 907 return 0; 908 err: 909 xfs_btree_del_cursor(cur, error); 910 return error; 911 } 912 913 /* Trigger reinitialization of the in-core data. */ 914 STATIC int 915 xrep_agi_commit_new( 916 struct xfs_scrub *sc, 917 struct xfs_buf *agi_bp) 918 { 919 struct xfs_perag *pag; 920 struct xfs_agi *agi = agi_bp->b_addr; 921 922 /* Trigger inode count recalculation */ 923 xfs_force_summary_recalc(sc->mp); 924 925 /* Write this to disk. */ 926 xfs_trans_buf_set_type(sc->tp, agi_bp, XFS_BLFT_AGI_BUF); 927 xfs_trans_log_buf(sc->tp, agi_bp, 0, BBTOB(agi_bp->b_length) - 1); 928 929 /* Now reinitialize the in-core counters if necessary. */ 930 pag = sc->sa.pag; 931 pag->pagi_count = be32_to_cpu(agi->agi_count); 932 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); 933 set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate); 934 935 return 0; 936 } 937 938 /* Repair the AGI. */ 939 int 940 xrep_agi( 941 struct xfs_scrub *sc) 942 { 943 struct xrep_find_ag_btree fab[XREP_AGI_MAX] = { 944 [XREP_AGI_INOBT] = { 945 .rmap_owner = XFS_RMAP_OWN_INOBT, 946 .buf_ops = &xfs_inobt_buf_ops, 947 .maxlevels = M_IGEO(sc->mp)->inobt_maxlevels, 948 }, 949 [XREP_AGI_FINOBT] = { 950 .rmap_owner = XFS_RMAP_OWN_INOBT, 951 .buf_ops = &xfs_finobt_buf_ops, 952 .maxlevels = M_IGEO(sc->mp)->inobt_maxlevels, 953 }, 954 [XREP_AGI_END] = { 955 .buf_ops = NULL 956 }, 957 }; 958 struct xfs_agi old_agi; 959 struct xfs_mount *mp = sc->mp; 960 struct xfs_buf *agi_bp; 961 struct xfs_agi *agi; 962 int error; 963 964 /* We require the rmapbt to rebuild anything. */ 965 if (!xfs_has_rmapbt(mp)) 966 return -EOPNOTSUPP; 967 968 /* 969 * Make sure we have the AGI buffer, as scrub might have decided it 970 * was corrupt after xfs_ialloc_read_agi failed with -EFSCORRUPTED. 971 */ 972 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp, 973 XFS_AG_DADDR(mp, sc->sa.pag->pag_agno, 974 XFS_AGI_DADDR(mp)), 975 XFS_FSS_TO_BB(mp, 1), 0, &agi_bp, NULL); 976 if (error) 977 return error; 978 agi_bp->b_ops = &xfs_agi_buf_ops; 979 agi = agi_bp->b_addr; 980 981 /* Find the AGI btree roots. */ 982 error = xrep_agi_find_btrees(sc, fab); 983 if (error) 984 return error; 985 986 /* Start rewriting the header and implant the btrees we found. */ 987 xrep_agi_init_header(sc, agi_bp, &old_agi); 988 xrep_agi_set_roots(sc, agi, fab); 989 error = xrep_agi_calc_from_btrees(sc, agi_bp); 990 if (error) 991 goto out_revert; 992 993 /* Reinitialize in-core state. */ 994 return xrep_agi_commit_new(sc, agi_bp); 995 996 out_revert: 997 /* Mark the incore AGI state stale and revert the AGI. */ 998 clear_bit(XFS_AGSTATE_AGI_INIT, &sc->sa.pag->pag_opstate); 999 memcpy(agi, &old_agi, sizeof(old_agi)); 1000 return error; 1001 } 1002