1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_btree.h" 13 #include "xfs_sb.h" 14 #include "xfs_alloc.h" 15 #include "xfs_ialloc.h" 16 #include "xfs_rmap.h" 17 #include "scrub/scrub.h" 18 #include "scrub/common.h" 19 20 /* Superblock */ 21 22 /* Cross-reference with the other btrees. */ 23 STATIC void 24 xchk_superblock_xref( 25 struct xfs_scrub *sc, 26 struct xfs_buf *bp) 27 { 28 struct xfs_mount *mp = sc->mp; 29 xfs_agnumber_t agno = sc->sm->sm_agno; 30 xfs_agblock_t agbno; 31 int error; 32 33 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 34 return; 35 36 agbno = XFS_SB_BLOCK(mp); 37 38 error = xchk_ag_init(sc, agno, &sc->sa); 39 if (!xchk_xref_process_error(sc, agno, agbno, &error)) 40 return; 41 42 xchk_xref_is_used_space(sc, agbno, 1); 43 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 44 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 45 xchk_xref_is_not_shared(sc, agbno, 1); 46 47 /* scrub teardown will take care of sc->sa for us */ 48 } 49 50 /* 51 * Scrub the filesystem superblock. 52 * 53 * Note: We do /not/ attempt to check AG 0's superblock. Mount is 54 * responsible for validating all the geometry information in sb 0, so 55 * if the filesystem is capable of initiating online scrub, then clearly 56 * sb 0 is ok and we can use its information to check everything else. 57 */ 58 int 59 xchk_superblock( 60 struct xfs_scrub *sc) 61 { 62 struct xfs_mount *mp = sc->mp; 63 struct xfs_buf *bp; 64 struct xfs_dsb *sb; 65 xfs_agnumber_t agno; 66 uint32_t v2_ok; 67 __be32 features_mask; 68 int error; 69 __be16 vernum_mask; 70 71 agno = sc->sm->sm_agno; 72 if (agno == 0) 73 return 0; 74 75 error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp); 76 /* 77 * The superblock verifier can return several different error codes 78 * if it thinks the superblock doesn't look right. For a mount these 79 * would all get bounced back to userspace, but if we're here then the 80 * fs mounted successfully, which means that this secondary superblock 81 * is simply incorrect. Treat all these codes the same way we treat 82 * any corruption. 83 */ 84 switch (error) { 85 case -EINVAL: /* also -EWRONGFS */ 86 case -ENOSYS: 87 case -EFBIG: 88 error = -EFSCORRUPTED; 89 default: 90 break; 91 } 92 if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error)) 93 return error; 94 95 sb = bp->b_addr; 96 97 /* 98 * Verify the geometries match. Fields that are permanently 99 * set by mkfs are checked; fields that can be updated later 100 * (and are not propagated to backup superblocks) are preen 101 * checked. 102 */ 103 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize)) 104 xchk_block_set_corrupt(sc, bp); 105 106 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks)) 107 xchk_block_set_corrupt(sc, bp); 108 109 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks)) 110 xchk_block_set_corrupt(sc, bp); 111 112 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents)) 113 xchk_block_set_corrupt(sc, bp); 114 115 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid)) 116 xchk_block_set_preen(sc, bp); 117 118 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart)) 119 xchk_block_set_corrupt(sc, bp); 120 121 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino)) 122 xchk_block_set_preen(sc, bp); 123 124 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino)) 125 xchk_block_set_preen(sc, bp); 126 127 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino)) 128 xchk_block_set_preen(sc, bp); 129 130 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize)) 131 xchk_block_set_corrupt(sc, bp); 132 133 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks)) 134 xchk_block_set_corrupt(sc, bp); 135 136 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount)) 137 xchk_block_set_corrupt(sc, bp); 138 139 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks)) 140 xchk_block_set_corrupt(sc, bp); 141 142 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks)) 143 xchk_block_set_corrupt(sc, bp); 144 145 /* Check sb_versionnum bits that are set at mkfs time. */ 146 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS | 147 XFS_SB_VERSION_NUMBITS | 148 XFS_SB_VERSION_ALIGNBIT | 149 XFS_SB_VERSION_DALIGNBIT | 150 XFS_SB_VERSION_SHAREDBIT | 151 XFS_SB_VERSION_LOGV2BIT | 152 XFS_SB_VERSION_SECTORBIT | 153 XFS_SB_VERSION_EXTFLGBIT | 154 XFS_SB_VERSION_DIRV2BIT); 155 if ((sb->sb_versionnum & vernum_mask) != 156 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) 157 xchk_block_set_corrupt(sc, bp); 158 159 /* Check sb_versionnum bits that can be set after mkfs time. */ 160 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT | 161 XFS_SB_VERSION_NLINKBIT | 162 XFS_SB_VERSION_QUOTABIT); 163 if ((sb->sb_versionnum & vernum_mask) != 164 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) 165 xchk_block_set_preen(sc, bp); 166 167 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize)) 168 xchk_block_set_corrupt(sc, bp); 169 170 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize)) 171 xchk_block_set_corrupt(sc, bp); 172 173 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock)) 174 xchk_block_set_corrupt(sc, bp); 175 176 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname))) 177 xchk_block_set_preen(sc, bp); 178 179 if (sb->sb_blocklog != mp->m_sb.sb_blocklog) 180 xchk_block_set_corrupt(sc, bp); 181 182 if (sb->sb_sectlog != mp->m_sb.sb_sectlog) 183 xchk_block_set_corrupt(sc, bp); 184 185 if (sb->sb_inodelog != mp->m_sb.sb_inodelog) 186 xchk_block_set_corrupt(sc, bp); 187 188 if (sb->sb_inopblog != mp->m_sb.sb_inopblog) 189 xchk_block_set_corrupt(sc, bp); 190 191 if (sb->sb_agblklog != mp->m_sb.sb_agblklog) 192 xchk_block_set_corrupt(sc, bp); 193 194 if (sb->sb_rextslog != mp->m_sb.sb_rextslog) 195 xchk_block_set_corrupt(sc, bp); 196 197 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct) 198 xchk_block_set_preen(sc, bp); 199 200 /* 201 * Skip the summary counters since we track them in memory anyway. 202 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents 203 */ 204 205 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino)) 206 xchk_block_set_preen(sc, bp); 207 208 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino)) 209 xchk_block_set_preen(sc, bp); 210 211 /* 212 * Skip the quota flags since repair will force quotacheck. 213 * sb_qflags 214 */ 215 216 if (sb->sb_flags != mp->m_sb.sb_flags) 217 xchk_block_set_corrupt(sc, bp); 218 219 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn) 220 xchk_block_set_corrupt(sc, bp); 221 222 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt)) 223 xchk_block_set_corrupt(sc, bp); 224 225 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit)) 226 xchk_block_set_preen(sc, bp); 227 228 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width)) 229 xchk_block_set_preen(sc, bp); 230 231 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog) 232 xchk_block_set_corrupt(sc, bp); 233 234 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog) 235 xchk_block_set_corrupt(sc, bp); 236 237 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize)) 238 xchk_block_set_corrupt(sc, bp); 239 240 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit)) 241 xchk_block_set_corrupt(sc, bp); 242 243 /* Do we see any invalid bits in sb_features2? */ 244 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) { 245 if (sb->sb_features2 != 0) 246 xchk_block_set_corrupt(sc, bp); 247 } else { 248 v2_ok = XFS_SB_VERSION2_OKBITS; 249 if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5) 250 v2_ok |= XFS_SB_VERSION2_CRCBIT; 251 252 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok))) 253 xchk_block_set_corrupt(sc, bp); 254 255 if (sb->sb_features2 != sb->sb_bad_features2) 256 xchk_block_set_preen(sc, bp); 257 } 258 259 /* Check sb_features2 flags that are set at mkfs time. */ 260 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT | 261 XFS_SB_VERSION2_PROJID32BIT | 262 XFS_SB_VERSION2_CRCBIT | 263 XFS_SB_VERSION2_FTYPE); 264 if ((sb->sb_features2 & features_mask) != 265 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) 266 xchk_block_set_corrupt(sc, bp); 267 268 /* Check sb_features2 flags that can be set after mkfs time. */ 269 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT); 270 if ((sb->sb_features2 & features_mask) != 271 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) 272 xchk_block_set_corrupt(sc, bp); 273 274 if (!xfs_sb_version_hascrc(&mp->m_sb)) { 275 /* all v5 fields must be zero */ 276 if (memchr_inv(&sb->sb_features_compat, 0, 277 sizeof(struct xfs_dsb) - 278 offsetof(struct xfs_dsb, sb_features_compat))) 279 xchk_block_set_corrupt(sc, bp); 280 } else { 281 /* Check compat flags; all are set at mkfs time. */ 282 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN); 283 if ((sb->sb_features_compat & features_mask) != 284 (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask)) 285 xchk_block_set_corrupt(sc, bp); 286 287 /* Check ro compat flags; all are set at mkfs time. */ 288 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN | 289 XFS_SB_FEAT_RO_COMPAT_FINOBT | 290 XFS_SB_FEAT_RO_COMPAT_RMAPBT | 291 XFS_SB_FEAT_RO_COMPAT_REFLINK); 292 if ((sb->sb_features_ro_compat & features_mask) != 293 (cpu_to_be32(mp->m_sb.sb_features_ro_compat) & 294 features_mask)) 295 xchk_block_set_corrupt(sc, bp); 296 297 /* Check incompat flags; all are set at mkfs time. */ 298 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN | 299 XFS_SB_FEAT_INCOMPAT_FTYPE | 300 XFS_SB_FEAT_INCOMPAT_SPINODES | 301 XFS_SB_FEAT_INCOMPAT_META_UUID); 302 if ((sb->sb_features_incompat & features_mask) != 303 (cpu_to_be32(mp->m_sb.sb_features_incompat) & 304 features_mask)) 305 xchk_block_set_corrupt(sc, bp); 306 307 /* Check log incompat flags; all are set at mkfs time. */ 308 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN); 309 if ((sb->sb_features_log_incompat & features_mask) != 310 (cpu_to_be32(mp->m_sb.sb_features_log_incompat) & 311 features_mask)) 312 xchk_block_set_corrupt(sc, bp); 313 314 /* Don't care about sb_crc */ 315 316 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align)) 317 xchk_block_set_corrupt(sc, bp); 318 319 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino)) 320 xchk_block_set_preen(sc, bp); 321 322 /* Don't care about sb_lsn */ 323 } 324 325 if (xfs_sb_version_hasmetauuid(&mp->m_sb)) { 326 /* The metadata UUID must be the same for all supers */ 327 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid)) 328 xchk_block_set_corrupt(sc, bp); 329 } 330 331 /* Everything else must be zero. */ 332 if (memchr_inv(sb + 1, 0, 333 BBTOB(bp->b_length) - sizeof(struct xfs_dsb))) 334 xchk_block_set_corrupt(sc, bp); 335 336 xchk_superblock_xref(sc, bp); 337 338 return error; 339 } 340 341 /* AGF */ 342 343 /* Tally freespace record lengths. */ 344 STATIC int 345 xchk_agf_record_bno_lengths( 346 struct xfs_btree_cur *cur, 347 struct xfs_alloc_rec_incore *rec, 348 void *priv) 349 { 350 xfs_extlen_t *blocks = priv; 351 352 (*blocks) += rec->ar_blockcount; 353 return 0; 354 } 355 356 /* Check agf_freeblks */ 357 static inline void 358 xchk_agf_xref_freeblks( 359 struct xfs_scrub *sc) 360 { 361 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 362 xfs_extlen_t blocks = 0; 363 int error; 364 365 if (!sc->sa.bno_cur) 366 return; 367 368 error = xfs_alloc_query_all(sc->sa.bno_cur, 369 xchk_agf_record_bno_lengths, &blocks); 370 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) 371 return; 372 if (blocks != be32_to_cpu(agf->agf_freeblks)) 373 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 374 } 375 376 /* Cross reference the AGF with the cntbt (freespace by length btree) */ 377 static inline void 378 xchk_agf_xref_cntbt( 379 struct xfs_scrub *sc) 380 { 381 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 382 xfs_agblock_t agbno; 383 xfs_extlen_t blocks; 384 int have; 385 int error; 386 387 if (!sc->sa.cnt_cur) 388 return; 389 390 /* Any freespace at all? */ 391 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have); 392 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) 393 return; 394 if (!have) { 395 if (agf->agf_freeblks != cpu_to_be32(0)) 396 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 397 return; 398 } 399 400 /* Check agf_longest */ 401 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have); 402 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) 403 return; 404 if (!have || blocks != be32_to_cpu(agf->agf_longest)) 405 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 406 } 407 408 /* Check the btree block counts in the AGF against the btrees. */ 409 STATIC void 410 xchk_agf_xref_btreeblks( 411 struct xfs_scrub *sc) 412 { 413 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 414 struct xfs_mount *mp = sc->mp; 415 xfs_agblock_t blocks; 416 xfs_agblock_t btreeblks; 417 int error; 418 419 /* Check agf_rmap_blocks; set up for agf_btreeblks check */ 420 if (sc->sa.rmap_cur) { 421 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks); 422 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 423 return; 424 btreeblks = blocks - 1; 425 if (blocks != be32_to_cpu(agf->agf_rmap_blocks)) 426 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 427 } else { 428 btreeblks = 0; 429 } 430 431 /* 432 * No rmap cursor; we can't xref if we have the rmapbt feature. 433 * We also can't do it if we're missing the free space btree cursors. 434 */ 435 if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) || 436 !sc->sa.bno_cur || !sc->sa.cnt_cur) 437 return; 438 439 /* Check agf_btreeblks */ 440 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); 441 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) 442 return; 443 btreeblks += blocks - 1; 444 445 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); 446 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) 447 return; 448 btreeblks += blocks - 1; 449 450 if (btreeblks != be32_to_cpu(agf->agf_btreeblks)) 451 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 452 } 453 454 /* Check agf_refcount_blocks against tree size */ 455 static inline void 456 xchk_agf_xref_refcblks( 457 struct xfs_scrub *sc) 458 { 459 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 460 xfs_agblock_t blocks; 461 int error; 462 463 if (!sc->sa.refc_cur) 464 return; 465 466 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks); 467 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) 468 return; 469 if (blocks != be32_to_cpu(agf->agf_refcount_blocks)) 470 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 471 } 472 473 /* Cross-reference with the other btrees. */ 474 STATIC void 475 xchk_agf_xref( 476 struct xfs_scrub *sc) 477 { 478 struct xfs_mount *mp = sc->mp; 479 xfs_agblock_t agbno; 480 481 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 482 return; 483 484 agbno = XFS_AGF_BLOCK(mp); 485 486 xchk_ag_btcur_init(sc, &sc->sa); 487 488 xchk_xref_is_used_space(sc, agbno, 1); 489 xchk_agf_xref_freeblks(sc); 490 xchk_agf_xref_cntbt(sc); 491 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 492 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 493 xchk_agf_xref_btreeblks(sc); 494 xchk_xref_is_not_shared(sc, agbno, 1); 495 xchk_agf_xref_refcblks(sc); 496 497 /* scrub teardown will take care of sc->sa for us */ 498 } 499 500 /* Scrub the AGF. */ 501 int 502 xchk_agf( 503 struct xfs_scrub *sc) 504 { 505 struct xfs_mount *mp = sc->mp; 506 struct xfs_agf *agf; 507 struct xfs_perag *pag; 508 xfs_agnumber_t agno = sc->sm->sm_agno; 509 xfs_agblock_t agbno; 510 xfs_agblock_t eoag; 511 xfs_agblock_t agfl_first; 512 xfs_agblock_t agfl_last; 513 xfs_agblock_t agfl_count; 514 xfs_agblock_t fl_count; 515 int level; 516 int error = 0; 517 518 error = xchk_ag_read_headers(sc, agno, &sc->sa); 519 if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error)) 520 goto out; 521 xchk_buffer_recheck(sc, sc->sa.agf_bp); 522 523 agf = sc->sa.agf_bp->b_addr; 524 525 /* Check the AG length */ 526 eoag = be32_to_cpu(agf->agf_length); 527 if (eoag != xfs_ag_block_count(mp, agno)) 528 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 529 530 /* Check the AGF btree roots and levels */ 531 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]); 532 if (!xfs_verify_agbno(mp, agno, agbno)) 533 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 534 535 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]); 536 if (!xfs_verify_agbno(mp, agno, agbno)) 537 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 538 539 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); 540 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 541 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 542 543 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); 544 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 545 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 546 547 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { 548 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]); 549 if (!xfs_verify_agbno(mp, agno, agbno)) 550 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 551 552 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]); 553 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 554 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 555 } 556 557 if (xfs_sb_version_hasreflink(&mp->m_sb)) { 558 agbno = be32_to_cpu(agf->agf_refcount_root); 559 if (!xfs_verify_agbno(mp, agno, agbno)) 560 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 561 562 level = be32_to_cpu(agf->agf_refcount_level); 563 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 564 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 565 } 566 567 /* Check the AGFL counters */ 568 agfl_first = be32_to_cpu(agf->agf_flfirst); 569 agfl_last = be32_to_cpu(agf->agf_fllast); 570 agfl_count = be32_to_cpu(agf->agf_flcount); 571 if (agfl_last > agfl_first) 572 fl_count = agfl_last - agfl_first + 1; 573 else 574 fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1; 575 if (agfl_count != 0 && fl_count != agfl_count) 576 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 577 578 /* Do the incore counters match? */ 579 pag = xfs_perag_get(mp, agno); 580 if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks)) 581 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 582 if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount)) 583 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 584 if (pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks)) 585 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 586 xfs_perag_put(pag); 587 588 xchk_agf_xref(sc); 589 out: 590 return error; 591 } 592 593 /* AGFL */ 594 595 struct xchk_agfl_info { 596 unsigned int sz_entries; 597 unsigned int nr_entries; 598 xfs_agblock_t *entries; 599 struct xfs_scrub *sc; 600 }; 601 602 /* Cross-reference with the other btrees. */ 603 STATIC void 604 xchk_agfl_block_xref( 605 struct xfs_scrub *sc, 606 xfs_agblock_t agbno) 607 { 608 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 609 return; 610 611 xchk_xref_is_used_space(sc, agbno, 1); 612 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 613 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG); 614 xchk_xref_is_not_shared(sc, agbno, 1); 615 } 616 617 /* Scrub an AGFL block. */ 618 STATIC int 619 xchk_agfl_block( 620 struct xfs_mount *mp, 621 xfs_agblock_t agbno, 622 void *priv) 623 { 624 struct xchk_agfl_info *sai = priv; 625 struct xfs_scrub *sc = sai->sc; 626 xfs_agnumber_t agno = sc->sa.agno; 627 628 if (xfs_verify_agbno(mp, agno, agbno) && 629 sai->nr_entries < sai->sz_entries) 630 sai->entries[sai->nr_entries++] = agbno; 631 else 632 xchk_block_set_corrupt(sc, sc->sa.agfl_bp); 633 634 xchk_agfl_block_xref(sc, agbno); 635 636 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 637 return -ECANCELED; 638 639 return 0; 640 } 641 642 static int 643 xchk_agblock_cmp( 644 const void *pa, 645 const void *pb) 646 { 647 const xfs_agblock_t *a = pa; 648 const xfs_agblock_t *b = pb; 649 650 return (int)*a - (int)*b; 651 } 652 653 /* Cross-reference with the other btrees. */ 654 STATIC void 655 xchk_agfl_xref( 656 struct xfs_scrub *sc) 657 { 658 struct xfs_mount *mp = sc->mp; 659 xfs_agblock_t agbno; 660 661 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 662 return; 663 664 agbno = XFS_AGFL_BLOCK(mp); 665 666 xchk_ag_btcur_init(sc, &sc->sa); 667 668 xchk_xref_is_used_space(sc, agbno, 1); 669 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 670 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 671 xchk_xref_is_not_shared(sc, agbno, 1); 672 673 /* 674 * Scrub teardown will take care of sc->sa for us. Leave sc->sa 675 * active so that the agfl block xref can use it too. 676 */ 677 } 678 679 /* Scrub the AGFL. */ 680 int 681 xchk_agfl( 682 struct xfs_scrub *sc) 683 { 684 struct xchk_agfl_info sai; 685 struct xfs_agf *agf; 686 xfs_agnumber_t agno = sc->sm->sm_agno; 687 unsigned int agflcount; 688 unsigned int i; 689 int error; 690 691 error = xchk_ag_read_headers(sc, agno, &sc->sa); 692 if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error)) 693 goto out; 694 if (!sc->sa.agf_bp) 695 return -EFSCORRUPTED; 696 xchk_buffer_recheck(sc, sc->sa.agfl_bp); 697 698 xchk_agfl_xref(sc); 699 700 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 701 goto out; 702 703 /* Allocate buffer to ensure uniqueness of AGFL entries. */ 704 agf = sc->sa.agf_bp->b_addr; 705 agflcount = be32_to_cpu(agf->agf_flcount); 706 if (agflcount > xfs_agfl_size(sc->mp)) { 707 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 708 goto out; 709 } 710 memset(&sai, 0, sizeof(sai)); 711 sai.sc = sc; 712 sai.sz_entries = agflcount; 713 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, 714 KM_MAYFAIL); 715 if (!sai.entries) { 716 error = -ENOMEM; 717 goto out; 718 } 719 720 /* Check the blocks in the AGFL. */ 721 error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr, 722 sc->sa.agfl_bp, xchk_agfl_block, &sai); 723 if (error == -ECANCELED) { 724 error = 0; 725 goto out_free; 726 } 727 if (error) 728 goto out_free; 729 730 if (agflcount != sai.nr_entries) { 731 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 732 goto out_free; 733 } 734 735 /* Sort entries, check for duplicates. */ 736 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]), 737 xchk_agblock_cmp, NULL); 738 for (i = 1; i < sai.nr_entries; i++) { 739 if (sai.entries[i] == sai.entries[i - 1]) { 740 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 741 break; 742 } 743 } 744 745 out_free: 746 kmem_free(sai.entries); 747 out: 748 return error; 749 } 750 751 /* AGI */ 752 753 /* Check agi_count/agi_freecount */ 754 static inline void 755 xchk_agi_xref_icounts( 756 struct xfs_scrub *sc) 757 { 758 struct xfs_agi *agi = sc->sa.agi_bp->b_addr; 759 xfs_agino_t icount; 760 xfs_agino_t freecount; 761 int error; 762 763 if (!sc->sa.ino_cur) 764 return; 765 766 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount); 767 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) 768 return; 769 if (be32_to_cpu(agi->agi_count) != icount || 770 be32_to_cpu(agi->agi_freecount) != freecount) 771 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); 772 } 773 774 /* Check agi_[fi]blocks against tree size */ 775 static inline void 776 xchk_agi_xref_fiblocks( 777 struct xfs_scrub *sc) 778 { 779 struct xfs_agi *agi = sc->sa.agi_bp->b_addr; 780 xfs_agblock_t blocks; 781 int error = 0; 782 783 if (!xfs_sb_version_hasinobtcounts(&sc->mp->m_sb)) 784 return; 785 786 if (sc->sa.ino_cur) { 787 error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks); 788 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) 789 return; 790 if (blocks != be32_to_cpu(agi->agi_iblocks)) 791 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); 792 } 793 794 if (sc->sa.fino_cur) { 795 error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks); 796 if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur)) 797 return; 798 if (blocks != be32_to_cpu(agi->agi_fblocks)) 799 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); 800 } 801 } 802 803 /* Cross-reference with the other btrees. */ 804 STATIC void 805 xchk_agi_xref( 806 struct xfs_scrub *sc) 807 { 808 struct xfs_mount *mp = sc->mp; 809 xfs_agblock_t agbno; 810 811 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 812 return; 813 814 agbno = XFS_AGI_BLOCK(mp); 815 816 xchk_ag_btcur_init(sc, &sc->sa); 817 818 xchk_xref_is_used_space(sc, agbno, 1); 819 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 820 xchk_agi_xref_icounts(sc); 821 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 822 xchk_xref_is_not_shared(sc, agbno, 1); 823 xchk_agi_xref_fiblocks(sc); 824 825 /* scrub teardown will take care of sc->sa for us */ 826 } 827 828 /* Scrub the AGI. */ 829 int 830 xchk_agi( 831 struct xfs_scrub *sc) 832 { 833 struct xfs_mount *mp = sc->mp; 834 struct xfs_agi *agi; 835 struct xfs_perag *pag; 836 xfs_agnumber_t agno = sc->sm->sm_agno; 837 xfs_agblock_t agbno; 838 xfs_agblock_t eoag; 839 xfs_agino_t agino; 840 xfs_agino_t first_agino; 841 xfs_agino_t last_agino; 842 xfs_agino_t icount; 843 int i; 844 int level; 845 int error = 0; 846 847 error = xchk_ag_read_headers(sc, agno, &sc->sa); 848 if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error)) 849 goto out; 850 xchk_buffer_recheck(sc, sc->sa.agi_bp); 851 852 agi = sc->sa.agi_bp->b_addr; 853 854 /* Check the AG length */ 855 eoag = be32_to_cpu(agi->agi_length); 856 if (eoag != xfs_ag_block_count(mp, agno)) 857 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 858 859 /* Check btree roots and levels */ 860 agbno = be32_to_cpu(agi->agi_root); 861 if (!xfs_verify_agbno(mp, agno, agbno)) 862 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 863 864 level = be32_to_cpu(agi->agi_level); 865 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 866 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 867 868 if (xfs_sb_version_hasfinobt(&mp->m_sb)) { 869 agbno = be32_to_cpu(agi->agi_free_root); 870 if (!xfs_verify_agbno(mp, agno, agbno)) 871 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 872 873 level = be32_to_cpu(agi->agi_free_level); 874 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 875 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 876 } 877 878 /* Check inode counters */ 879 xfs_agino_range(mp, agno, &first_agino, &last_agino); 880 icount = be32_to_cpu(agi->agi_count); 881 if (icount > last_agino - first_agino + 1 || 882 icount < be32_to_cpu(agi->agi_freecount)) 883 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 884 885 /* Check inode pointers */ 886 agino = be32_to_cpu(agi->agi_newino); 887 if (!xfs_verify_agino_or_null(mp, agno, agino)) 888 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 889 890 agino = be32_to_cpu(agi->agi_dirino); 891 if (!xfs_verify_agino_or_null(mp, agno, agino)) 892 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 893 894 /* Check unlinked inode buckets */ 895 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { 896 agino = be32_to_cpu(agi->agi_unlinked[i]); 897 if (!xfs_verify_agino_or_null(mp, agno, agino)) 898 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 899 } 900 901 if (agi->agi_pad32 != cpu_to_be32(0)) 902 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 903 904 /* Do the incore counters match? */ 905 pag = xfs_perag_get(mp, agno); 906 if (pag->pagi_count != be32_to_cpu(agi->agi_count)) 907 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 908 if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount)) 909 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 910 xfs_perag_put(pag); 911 912 xchk_agi_xref(sc); 913 out: 914 return error; 915 } 916