1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_btree.h" 13 #include "xfs_sb.h" 14 #include "xfs_alloc.h" 15 #include "xfs_ialloc.h" 16 #include "xfs_rmap.h" 17 #include "xfs_ag.h" 18 #include "scrub/scrub.h" 19 #include "scrub/common.h" 20 21 /* Superblock */ 22 23 /* Cross-reference with the other btrees. */ 24 STATIC void 25 xchk_superblock_xref( 26 struct xfs_scrub *sc, 27 struct xfs_buf *bp) 28 { 29 struct xfs_mount *mp = sc->mp; 30 xfs_agnumber_t agno = sc->sm->sm_agno; 31 xfs_agblock_t agbno; 32 int error; 33 34 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 35 return; 36 37 agbno = XFS_SB_BLOCK(mp); 38 39 error = xchk_ag_init_existing(sc, agno, &sc->sa); 40 if (!xchk_xref_process_error(sc, agno, agbno, &error)) 41 return; 42 43 xchk_xref_is_used_space(sc, agbno, 1); 44 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 45 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 46 xchk_xref_is_not_shared(sc, agbno, 1); 47 48 /* scrub teardown will take care of sc->sa for us */ 49 } 50 51 /* 52 * Scrub the filesystem superblock. 53 * 54 * Note: We do /not/ attempt to check AG 0's superblock. Mount is 55 * responsible for validating all the geometry information in sb 0, so 56 * if the filesystem is capable of initiating online scrub, then clearly 57 * sb 0 is ok and we can use its information to check everything else. 58 */ 59 int 60 xchk_superblock( 61 struct xfs_scrub *sc) 62 { 63 struct xfs_mount *mp = sc->mp; 64 struct xfs_buf *bp; 65 struct xfs_dsb *sb; 66 struct xfs_perag *pag; 67 xfs_agnumber_t agno; 68 uint32_t v2_ok; 69 __be32 features_mask; 70 int error; 71 __be16 vernum_mask; 72 73 agno = sc->sm->sm_agno; 74 if (agno == 0) 75 return 0; 76 77 /* 78 * Grab an active reference to the perag structure. If we can't get 79 * it, we're racing with something that's tearing down the AG, so 80 * signal that the AG no longer exists. 81 */ 82 pag = xfs_perag_get(mp, agno); 83 if (!pag) 84 return -ENOENT; 85 86 error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp); 87 /* 88 * The superblock verifier can return several different error codes 89 * if it thinks the superblock doesn't look right. For a mount these 90 * would all get bounced back to userspace, but if we're here then the 91 * fs mounted successfully, which means that this secondary superblock 92 * is simply incorrect. Treat all these codes the same way we treat 93 * any corruption. 94 */ 95 switch (error) { 96 case -EINVAL: /* also -EWRONGFS */ 97 case -ENOSYS: 98 case -EFBIG: 99 error = -EFSCORRUPTED; 100 fallthrough; 101 default: 102 break; 103 } 104 if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error)) 105 goto out_pag; 106 107 sb = bp->b_addr; 108 109 /* 110 * Verify the geometries match. Fields that are permanently 111 * set by mkfs are checked; fields that can be updated later 112 * (and are not propagated to backup superblocks) are preen 113 * checked. 114 */ 115 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize)) 116 xchk_block_set_corrupt(sc, bp); 117 118 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks)) 119 xchk_block_set_corrupt(sc, bp); 120 121 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks)) 122 xchk_block_set_corrupt(sc, bp); 123 124 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents)) 125 xchk_block_set_corrupt(sc, bp); 126 127 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid)) 128 xchk_block_set_preen(sc, bp); 129 130 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart)) 131 xchk_block_set_corrupt(sc, bp); 132 133 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino)) 134 xchk_block_set_preen(sc, bp); 135 136 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino)) 137 xchk_block_set_preen(sc, bp); 138 139 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino)) 140 xchk_block_set_preen(sc, bp); 141 142 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize)) 143 xchk_block_set_corrupt(sc, bp); 144 145 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks)) 146 xchk_block_set_corrupt(sc, bp); 147 148 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount)) 149 xchk_block_set_corrupt(sc, bp); 150 151 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks)) 152 xchk_block_set_corrupt(sc, bp); 153 154 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks)) 155 xchk_block_set_corrupt(sc, bp); 156 157 /* Check sb_versionnum bits that are set at mkfs time. */ 158 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS | 159 XFS_SB_VERSION_NUMBITS | 160 XFS_SB_VERSION_ALIGNBIT | 161 XFS_SB_VERSION_DALIGNBIT | 162 XFS_SB_VERSION_SHAREDBIT | 163 XFS_SB_VERSION_LOGV2BIT | 164 XFS_SB_VERSION_SECTORBIT | 165 XFS_SB_VERSION_EXTFLGBIT | 166 XFS_SB_VERSION_DIRV2BIT); 167 if ((sb->sb_versionnum & vernum_mask) != 168 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) 169 xchk_block_set_corrupt(sc, bp); 170 171 /* Check sb_versionnum bits that can be set after mkfs time. */ 172 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT | 173 XFS_SB_VERSION_NLINKBIT | 174 XFS_SB_VERSION_QUOTABIT); 175 if ((sb->sb_versionnum & vernum_mask) != 176 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) 177 xchk_block_set_preen(sc, bp); 178 179 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize)) 180 xchk_block_set_corrupt(sc, bp); 181 182 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize)) 183 xchk_block_set_corrupt(sc, bp); 184 185 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock)) 186 xchk_block_set_corrupt(sc, bp); 187 188 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname))) 189 xchk_block_set_preen(sc, bp); 190 191 if (sb->sb_blocklog != mp->m_sb.sb_blocklog) 192 xchk_block_set_corrupt(sc, bp); 193 194 if (sb->sb_sectlog != mp->m_sb.sb_sectlog) 195 xchk_block_set_corrupt(sc, bp); 196 197 if (sb->sb_inodelog != mp->m_sb.sb_inodelog) 198 xchk_block_set_corrupt(sc, bp); 199 200 if (sb->sb_inopblog != mp->m_sb.sb_inopblog) 201 xchk_block_set_corrupt(sc, bp); 202 203 if (sb->sb_agblklog != mp->m_sb.sb_agblklog) 204 xchk_block_set_corrupt(sc, bp); 205 206 if (sb->sb_rextslog != mp->m_sb.sb_rextslog) 207 xchk_block_set_corrupt(sc, bp); 208 209 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct) 210 xchk_block_set_preen(sc, bp); 211 212 /* 213 * Skip the summary counters since we track them in memory anyway. 214 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents 215 */ 216 217 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino)) 218 xchk_block_set_preen(sc, bp); 219 220 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino)) 221 xchk_block_set_preen(sc, bp); 222 223 /* 224 * Skip the quota flags since repair will force quotacheck. 225 * sb_qflags 226 */ 227 228 if (sb->sb_flags != mp->m_sb.sb_flags) 229 xchk_block_set_corrupt(sc, bp); 230 231 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn) 232 xchk_block_set_corrupt(sc, bp); 233 234 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt)) 235 xchk_block_set_corrupt(sc, bp); 236 237 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit)) 238 xchk_block_set_preen(sc, bp); 239 240 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width)) 241 xchk_block_set_preen(sc, bp); 242 243 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog) 244 xchk_block_set_corrupt(sc, bp); 245 246 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog) 247 xchk_block_set_corrupt(sc, bp); 248 249 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize)) 250 xchk_block_set_corrupt(sc, bp); 251 252 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit)) 253 xchk_block_set_corrupt(sc, bp); 254 255 /* Do we see any invalid bits in sb_features2? */ 256 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) { 257 if (sb->sb_features2 != 0) 258 xchk_block_set_corrupt(sc, bp); 259 } else { 260 v2_ok = XFS_SB_VERSION2_OKBITS; 261 if (xfs_sb_is_v5(&mp->m_sb)) 262 v2_ok |= XFS_SB_VERSION2_CRCBIT; 263 264 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok))) 265 xchk_block_set_corrupt(sc, bp); 266 267 if (sb->sb_features2 != sb->sb_bad_features2) 268 xchk_block_set_preen(sc, bp); 269 } 270 271 /* Check sb_features2 flags that are set at mkfs time. */ 272 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT | 273 XFS_SB_VERSION2_PROJID32BIT | 274 XFS_SB_VERSION2_CRCBIT | 275 XFS_SB_VERSION2_FTYPE); 276 if ((sb->sb_features2 & features_mask) != 277 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) 278 xchk_block_set_corrupt(sc, bp); 279 280 /* Check sb_features2 flags that can be set after mkfs time. */ 281 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT); 282 if ((sb->sb_features2 & features_mask) != 283 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) 284 xchk_block_set_preen(sc, bp); 285 286 if (!xfs_has_crc(mp)) { 287 /* all v5 fields must be zero */ 288 if (memchr_inv(&sb->sb_features_compat, 0, 289 sizeof(struct xfs_dsb) - 290 offsetof(struct xfs_dsb, sb_features_compat))) 291 xchk_block_set_corrupt(sc, bp); 292 } else { 293 /* compat features must match */ 294 if (sb->sb_features_compat != 295 cpu_to_be32(mp->m_sb.sb_features_compat)) 296 xchk_block_set_corrupt(sc, bp); 297 298 /* ro compat features must match */ 299 if (sb->sb_features_ro_compat != 300 cpu_to_be32(mp->m_sb.sb_features_ro_compat)) 301 xchk_block_set_corrupt(sc, bp); 302 303 /* 304 * NEEDSREPAIR is ignored on a secondary super, so we should 305 * clear it when we find it, though it's not a corruption. 306 */ 307 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR); 308 if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^ 309 sb->sb_features_incompat) & features_mask) 310 xchk_block_set_preen(sc, bp); 311 312 /* all other incompat features must match */ 313 if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^ 314 sb->sb_features_incompat) & ~features_mask) 315 xchk_block_set_corrupt(sc, bp); 316 317 /* 318 * log incompat features protect newer log record types from 319 * older log recovery code. Log recovery doesn't check the 320 * secondary supers, so we can clear these if needed. 321 */ 322 if (sb->sb_features_log_incompat) 323 xchk_block_set_preen(sc, bp); 324 325 /* Don't care about sb_crc */ 326 327 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align)) 328 xchk_block_set_corrupt(sc, bp); 329 330 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino)) 331 xchk_block_set_preen(sc, bp); 332 333 /* Don't care about sb_lsn */ 334 } 335 336 if (xfs_has_metauuid(mp)) { 337 /* The metadata UUID must be the same for all supers */ 338 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid)) 339 xchk_block_set_corrupt(sc, bp); 340 } 341 342 /* Everything else must be zero. */ 343 if (memchr_inv(sb + 1, 0, 344 BBTOB(bp->b_length) - sizeof(struct xfs_dsb))) 345 xchk_block_set_corrupt(sc, bp); 346 347 xchk_superblock_xref(sc, bp); 348 out_pag: 349 xfs_perag_put(pag); 350 return error; 351 } 352 353 /* AGF */ 354 355 /* Tally freespace record lengths. */ 356 STATIC int 357 xchk_agf_record_bno_lengths( 358 struct xfs_btree_cur *cur, 359 const struct xfs_alloc_rec_incore *rec, 360 void *priv) 361 { 362 xfs_extlen_t *blocks = priv; 363 364 (*blocks) += rec->ar_blockcount; 365 return 0; 366 } 367 368 /* Check agf_freeblks */ 369 static inline void 370 xchk_agf_xref_freeblks( 371 struct xfs_scrub *sc) 372 { 373 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 374 xfs_extlen_t blocks = 0; 375 int error; 376 377 if (!sc->sa.bno_cur) 378 return; 379 380 error = xfs_alloc_query_all(sc->sa.bno_cur, 381 xchk_agf_record_bno_lengths, &blocks); 382 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) 383 return; 384 if (blocks != be32_to_cpu(agf->agf_freeblks)) 385 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 386 } 387 388 /* Cross reference the AGF with the cntbt (freespace by length btree) */ 389 static inline void 390 xchk_agf_xref_cntbt( 391 struct xfs_scrub *sc) 392 { 393 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 394 xfs_agblock_t agbno; 395 xfs_extlen_t blocks; 396 int have; 397 int error; 398 399 if (!sc->sa.cnt_cur) 400 return; 401 402 /* Any freespace at all? */ 403 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have); 404 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) 405 return; 406 if (!have) { 407 if (agf->agf_freeblks != cpu_to_be32(0)) 408 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 409 return; 410 } 411 412 /* Check agf_longest */ 413 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have); 414 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) 415 return; 416 if (!have || blocks != be32_to_cpu(agf->agf_longest)) 417 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 418 } 419 420 /* Check the btree block counts in the AGF against the btrees. */ 421 STATIC void 422 xchk_agf_xref_btreeblks( 423 struct xfs_scrub *sc) 424 { 425 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 426 struct xfs_mount *mp = sc->mp; 427 xfs_agblock_t blocks; 428 xfs_agblock_t btreeblks; 429 int error; 430 431 /* agf_btreeblks didn't exist before lazysbcount */ 432 if (!xfs_has_lazysbcount(sc->mp)) 433 return; 434 435 /* Check agf_rmap_blocks; set up for agf_btreeblks check */ 436 if (sc->sa.rmap_cur) { 437 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks); 438 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 439 return; 440 btreeblks = blocks - 1; 441 if (blocks != be32_to_cpu(agf->agf_rmap_blocks)) 442 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 443 } else { 444 btreeblks = 0; 445 } 446 447 /* 448 * No rmap cursor; we can't xref if we have the rmapbt feature. 449 * We also can't do it if we're missing the free space btree cursors. 450 */ 451 if ((xfs_has_rmapbt(mp) && !sc->sa.rmap_cur) || 452 !sc->sa.bno_cur || !sc->sa.cnt_cur) 453 return; 454 455 /* Check agf_btreeblks */ 456 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); 457 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) 458 return; 459 btreeblks += blocks - 1; 460 461 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); 462 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) 463 return; 464 btreeblks += blocks - 1; 465 466 if (btreeblks != be32_to_cpu(agf->agf_btreeblks)) 467 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 468 } 469 470 /* Check agf_refcount_blocks against tree size */ 471 static inline void 472 xchk_agf_xref_refcblks( 473 struct xfs_scrub *sc) 474 { 475 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 476 xfs_agblock_t blocks; 477 int error; 478 479 if (!sc->sa.refc_cur) 480 return; 481 482 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks); 483 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) 484 return; 485 if (blocks != be32_to_cpu(agf->agf_refcount_blocks)) 486 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 487 } 488 489 /* Cross-reference with the other btrees. */ 490 STATIC void 491 xchk_agf_xref( 492 struct xfs_scrub *sc) 493 { 494 struct xfs_mount *mp = sc->mp; 495 xfs_agblock_t agbno; 496 497 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 498 return; 499 500 agbno = XFS_AGF_BLOCK(mp); 501 502 xchk_ag_btcur_init(sc, &sc->sa); 503 504 xchk_xref_is_used_space(sc, agbno, 1); 505 xchk_agf_xref_freeblks(sc); 506 xchk_agf_xref_cntbt(sc); 507 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 508 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 509 xchk_agf_xref_btreeblks(sc); 510 xchk_xref_is_not_shared(sc, agbno, 1); 511 xchk_agf_xref_refcblks(sc); 512 513 /* scrub teardown will take care of sc->sa for us */ 514 } 515 516 /* Scrub the AGF. */ 517 int 518 xchk_agf( 519 struct xfs_scrub *sc) 520 { 521 struct xfs_mount *mp = sc->mp; 522 struct xfs_agf *agf; 523 struct xfs_perag *pag; 524 xfs_agnumber_t agno = sc->sm->sm_agno; 525 xfs_agblock_t agbno; 526 xfs_agblock_t eoag; 527 xfs_agblock_t agfl_first; 528 xfs_agblock_t agfl_last; 529 xfs_agblock_t agfl_count; 530 xfs_agblock_t fl_count; 531 int level; 532 int error = 0; 533 534 error = xchk_ag_read_headers(sc, agno, &sc->sa); 535 if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error)) 536 goto out; 537 xchk_buffer_recheck(sc, sc->sa.agf_bp); 538 539 agf = sc->sa.agf_bp->b_addr; 540 pag = sc->sa.pag; 541 542 /* Check the AG length */ 543 eoag = be32_to_cpu(agf->agf_length); 544 if (eoag != pag->block_count) 545 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 546 547 /* Check the AGF btree roots and levels */ 548 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]); 549 if (!xfs_verify_agbno(pag, agbno)) 550 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 551 552 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]); 553 if (!xfs_verify_agbno(pag, agbno)) 554 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 555 556 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); 557 if (level <= 0 || level > mp->m_alloc_maxlevels) 558 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 559 560 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); 561 if (level <= 0 || level > mp->m_alloc_maxlevels) 562 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 563 564 if (xfs_has_rmapbt(mp)) { 565 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]); 566 if (!xfs_verify_agbno(pag, agbno)) 567 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 568 569 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]); 570 if (level <= 0 || level > mp->m_rmap_maxlevels) 571 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 572 } 573 574 if (xfs_has_reflink(mp)) { 575 agbno = be32_to_cpu(agf->agf_refcount_root); 576 if (!xfs_verify_agbno(pag, agbno)) 577 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 578 579 level = be32_to_cpu(agf->agf_refcount_level); 580 if (level <= 0 || level > mp->m_refc_maxlevels) 581 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 582 } 583 584 /* Check the AGFL counters */ 585 agfl_first = be32_to_cpu(agf->agf_flfirst); 586 agfl_last = be32_to_cpu(agf->agf_fllast); 587 agfl_count = be32_to_cpu(agf->agf_flcount); 588 if (agfl_last > agfl_first) 589 fl_count = agfl_last - agfl_first + 1; 590 else 591 fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1; 592 if (agfl_count != 0 && fl_count != agfl_count) 593 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 594 595 /* Do the incore counters match? */ 596 if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks)) 597 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 598 if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount)) 599 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 600 if (xfs_has_lazysbcount(sc->mp) && 601 pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks)) 602 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 603 604 xchk_agf_xref(sc); 605 out: 606 return error; 607 } 608 609 /* AGFL */ 610 611 struct xchk_agfl_info { 612 /* Number of AGFL entries that the AGF claims are in use. */ 613 unsigned int agflcount; 614 615 /* Number of AGFL entries that we found. */ 616 unsigned int nr_entries; 617 618 /* Buffer to hold AGFL entries for extent checking. */ 619 xfs_agblock_t *entries; 620 621 struct xfs_buf *agfl_bp; 622 struct xfs_scrub *sc; 623 }; 624 625 /* Cross-reference with the other btrees. */ 626 STATIC void 627 xchk_agfl_block_xref( 628 struct xfs_scrub *sc, 629 xfs_agblock_t agbno) 630 { 631 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 632 return; 633 634 xchk_xref_is_used_space(sc, agbno, 1); 635 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 636 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG); 637 xchk_xref_is_not_shared(sc, agbno, 1); 638 } 639 640 /* Scrub an AGFL block. */ 641 STATIC int 642 xchk_agfl_block( 643 struct xfs_mount *mp, 644 xfs_agblock_t agbno, 645 void *priv) 646 { 647 struct xchk_agfl_info *sai = priv; 648 struct xfs_scrub *sc = sai->sc; 649 650 if (xfs_verify_agbno(sc->sa.pag, agbno) && 651 sai->nr_entries < sai->agflcount) 652 sai->entries[sai->nr_entries++] = agbno; 653 else 654 xchk_block_set_corrupt(sc, sai->agfl_bp); 655 656 xchk_agfl_block_xref(sc, agbno); 657 658 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 659 return -ECANCELED; 660 661 return 0; 662 } 663 664 static int 665 xchk_agblock_cmp( 666 const void *pa, 667 const void *pb) 668 { 669 const xfs_agblock_t *a = pa; 670 const xfs_agblock_t *b = pb; 671 672 return (int)*a - (int)*b; 673 } 674 675 /* Cross-reference with the other btrees. */ 676 STATIC void 677 xchk_agfl_xref( 678 struct xfs_scrub *sc) 679 { 680 struct xfs_mount *mp = sc->mp; 681 xfs_agblock_t agbno; 682 683 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 684 return; 685 686 agbno = XFS_AGFL_BLOCK(mp); 687 688 xchk_ag_btcur_init(sc, &sc->sa); 689 690 xchk_xref_is_used_space(sc, agbno, 1); 691 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 692 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 693 xchk_xref_is_not_shared(sc, agbno, 1); 694 695 /* 696 * Scrub teardown will take care of sc->sa for us. Leave sc->sa 697 * active so that the agfl block xref can use it too. 698 */ 699 } 700 701 /* Scrub the AGFL. */ 702 int 703 xchk_agfl( 704 struct xfs_scrub *sc) 705 { 706 struct xchk_agfl_info sai = { 707 .sc = sc, 708 }; 709 struct xfs_agf *agf; 710 xfs_agnumber_t agno = sc->sm->sm_agno; 711 unsigned int i; 712 int error; 713 714 /* Lock the AGF and AGI so that nobody can touch this AG. */ 715 error = xchk_ag_read_headers(sc, agno, &sc->sa); 716 if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error)) 717 return error; 718 if (!sc->sa.agf_bp) 719 return -EFSCORRUPTED; 720 721 /* Try to read the AGFL, and verify its structure if we get it. */ 722 error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &sai.agfl_bp); 723 if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error)) 724 return error; 725 xchk_buffer_recheck(sc, sai.agfl_bp); 726 727 xchk_agfl_xref(sc); 728 729 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 730 goto out; 731 732 /* Allocate buffer to ensure uniqueness of AGFL entries. */ 733 agf = sc->sa.agf_bp->b_addr; 734 sai.agflcount = be32_to_cpu(agf->agf_flcount); 735 if (sai.agflcount > xfs_agfl_size(sc->mp)) { 736 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 737 goto out; 738 } 739 sai.entries = kvcalloc(sai.agflcount, sizeof(xfs_agblock_t), 740 XCHK_GFP_FLAGS); 741 if (!sai.entries) { 742 error = -ENOMEM; 743 goto out; 744 } 745 746 /* Check the blocks in the AGFL. */ 747 error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr, sai.agfl_bp, 748 xchk_agfl_block, &sai); 749 if (error == -ECANCELED) { 750 error = 0; 751 goto out_free; 752 } 753 if (error) 754 goto out_free; 755 756 if (sai.agflcount != sai.nr_entries) { 757 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 758 goto out_free; 759 } 760 761 /* Sort entries, check for duplicates. */ 762 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]), 763 xchk_agblock_cmp, NULL); 764 for (i = 1; i < sai.nr_entries; i++) { 765 if (sai.entries[i] == sai.entries[i - 1]) { 766 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 767 break; 768 } 769 } 770 771 out_free: 772 kvfree(sai.entries); 773 out: 774 return error; 775 } 776 777 /* AGI */ 778 779 /* Check agi_count/agi_freecount */ 780 static inline void 781 xchk_agi_xref_icounts( 782 struct xfs_scrub *sc) 783 { 784 struct xfs_agi *agi = sc->sa.agi_bp->b_addr; 785 xfs_agino_t icount; 786 xfs_agino_t freecount; 787 int error; 788 789 if (!sc->sa.ino_cur) 790 return; 791 792 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount); 793 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) 794 return; 795 if (be32_to_cpu(agi->agi_count) != icount || 796 be32_to_cpu(agi->agi_freecount) != freecount) 797 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); 798 } 799 800 /* Check agi_[fi]blocks against tree size */ 801 static inline void 802 xchk_agi_xref_fiblocks( 803 struct xfs_scrub *sc) 804 { 805 struct xfs_agi *agi = sc->sa.agi_bp->b_addr; 806 xfs_agblock_t blocks; 807 int error = 0; 808 809 if (!xfs_has_inobtcounts(sc->mp)) 810 return; 811 812 if (sc->sa.ino_cur) { 813 error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks); 814 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) 815 return; 816 if (blocks != be32_to_cpu(agi->agi_iblocks)) 817 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); 818 } 819 820 if (sc->sa.fino_cur) { 821 error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks); 822 if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur)) 823 return; 824 if (blocks != be32_to_cpu(agi->agi_fblocks)) 825 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); 826 } 827 } 828 829 /* Cross-reference with the other btrees. */ 830 STATIC void 831 xchk_agi_xref( 832 struct xfs_scrub *sc) 833 { 834 struct xfs_mount *mp = sc->mp; 835 xfs_agblock_t agbno; 836 837 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 838 return; 839 840 agbno = XFS_AGI_BLOCK(mp); 841 842 xchk_ag_btcur_init(sc, &sc->sa); 843 844 xchk_xref_is_used_space(sc, agbno, 1); 845 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 846 xchk_agi_xref_icounts(sc); 847 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 848 xchk_xref_is_not_shared(sc, agbno, 1); 849 xchk_agi_xref_fiblocks(sc); 850 851 /* scrub teardown will take care of sc->sa for us */ 852 } 853 854 /* Scrub the AGI. */ 855 int 856 xchk_agi( 857 struct xfs_scrub *sc) 858 { 859 struct xfs_mount *mp = sc->mp; 860 struct xfs_agi *agi; 861 struct xfs_perag *pag; 862 struct xfs_ino_geometry *igeo = M_IGEO(sc->mp); 863 xfs_agnumber_t agno = sc->sm->sm_agno; 864 xfs_agblock_t agbno; 865 xfs_agblock_t eoag; 866 xfs_agino_t agino; 867 xfs_agino_t first_agino; 868 xfs_agino_t last_agino; 869 xfs_agino_t icount; 870 int i; 871 int level; 872 int error = 0; 873 874 error = xchk_ag_read_headers(sc, agno, &sc->sa); 875 if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error)) 876 goto out; 877 xchk_buffer_recheck(sc, sc->sa.agi_bp); 878 879 agi = sc->sa.agi_bp->b_addr; 880 pag = sc->sa.pag; 881 882 /* Check the AG length */ 883 eoag = be32_to_cpu(agi->agi_length); 884 if (eoag != pag->block_count) 885 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 886 887 /* Check btree roots and levels */ 888 agbno = be32_to_cpu(agi->agi_root); 889 if (!xfs_verify_agbno(pag, agbno)) 890 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 891 892 level = be32_to_cpu(agi->agi_level); 893 if (level <= 0 || level > igeo->inobt_maxlevels) 894 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 895 896 if (xfs_has_finobt(mp)) { 897 agbno = be32_to_cpu(agi->agi_free_root); 898 if (!xfs_verify_agbno(pag, agbno)) 899 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 900 901 level = be32_to_cpu(agi->agi_free_level); 902 if (level <= 0 || level > igeo->inobt_maxlevels) 903 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 904 } 905 906 /* Check inode counters */ 907 xfs_agino_range(mp, agno, &first_agino, &last_agino); 908 icount = be32_to_cpu(agi->agi_count); 909 if (icount > last_agino - first_agino + 1 || 910 icount < be32_to_cpu(agi->agi_freecount)) 911 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 912 913 /* Check inode pointers */ 914 agino = be32_to_cpu(agi->agi_newino); 915 if (!xfs_verify_agino_or_null(pag, agino)) 916 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 917 918 agino = be32_to_cpu(agi->agi_dirino); 919 if (!xfs_verify_agino_or_null(pag, agino)) 920 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 921 922 /* Check unlinked inode buckets */ 923 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { 924 agino = be32_to_cpu(agi->agi_unlinked[i]); 925 if (!xfs_verify_agino_or_null(pag, agino)) 926 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 927 } 928 929 if (agi->agi_pad32 != cpu_to_be32(0)) 930 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 931 932 /* Do the incore counters match? */ 933 if (pag->pagi_count != be32_to_cpu(agi->agi_count)) 934 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 935 if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount)) 936 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 937 938 xchk_agi_xref(sc); 939 out: 940 return error; 941 } 942