1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_btree.h" 13 #include "xfs_sb.h" 14 #include "xfs_alloc.h" 15 #include "xfs_ialloc.h" 16 #include "xfs_rmap.h" 17 #include "scrub/scrub.h" 18 #include "scrub/common.h" 19 20 /* Superblock */ 21 22 /* Cross-reference with the other btrees. */ 23 STATIC void 24 xchk_superblock_xref( 25 struct xfs_scrub *sc, 26 struct xfs_buf *bp) 27 { 28 struct xfs_mount *mp = sc->mp; 29 xfs_agnumber_t agno = sc->sm->sm_agno; 30 xfs_agblock_t agbno; 31 int error; 32 33 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 34 return; 35 36 agbno = XFS_SB_BLOCK(mp); 37 38 error = xchk_ag_init(sc, agno, &sc->sa); 39 if (!xchk_xref_process_error(sc, agno, agbno, &error)) 40 return; 41 42 xchk_xref_is_used_space(sc, agbno, 1); 43 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 44 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 45 xchk_xref_is_not_shared(sc, agbno, 1); 46 47 /* scrub teardown will take care of sc->sa for us */ 48 } 49 50 /* 51 * Scrub the filesystem superblock. 52 * 53 * Note: We do /not/ attempt to check AG 0's superblock. Mount is 54 * responsible for validating all the geometry information in sb 0, so 55 * if the filesystem is capable of initiating online scrub, then clearly 56 * sb 0 is ok and we can use its information to check everything else. 57 */ 58 int 59 xchk_superblock( 60 struct xfs_scrub *sc) 61 { 62 struct xfs_mount *mp = sc->mp; 63 struct xfs_buf *bp; 64 struct xfs_dsb *sb; 65 xfs_agnumber_t agno; 66 uint32_t v2_ok; 67 __be32 features_mask; 68 int error; 69 __be16 vernum_mask; 70 71 agno = sc->sm->sm_agno; 72 if (agno == 0) 73 return 0; 74 75 error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp); 76 /* 77 * The superblock verifier can return several different error codes 78 * if it thinks the superblock doesn't look right. For a mount these 79 * would all get bounced back to userspace, but if we're here then the 80 * fs mounted successfully, which means that this secondary superblock 81 * is simply incorrect. Treat all these codes the same way we treat 82 * any corruption. 83 */ 84 switch (error) { 85 case -EINVAL: /* also -EWRONGFS */ 86 case -ENOSYS: 87 case -EFBIG: 88 error = -EFSCORRUPTED; 89 default: 90 break; 91 } 92 if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error)) 93 return error; 94 95 sb = bp->b_addr; 96 97 /* 98 * Verify the geometries match. Fields that are permanently 99 * set by mkfs are checked; fields that can be updated later 100 * (and are not propagated to backup superblocks) are preen 101 * checked. 102 */ 103 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize)) 104 xchk_block_set_corrupt(sc, bp); 105 106 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks)) 107 xchk_block_set_corrupt(sc, bp); 108 109 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks)) 110 xchk_block_set_corrupt(sc, bp); 111 112 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents)) 113 xchk_block_set_corrupt(sc, bp); 114 115 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid)) 116 xchk_block_set_preen(sc, bp); 117 118 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart)) 119 xchk_block_set_corrupt(sc, bp); 120 121 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino)) 122 xchk_block_set_preen(sc, bp); 123 124 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino)) 125 xchk_block_set_preen(sc, bp); 126 127 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino)) 128 xchk_block_set_preen(sc, bp); 129 130 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize)) 131 xchk_block_set_corrupt(sc, bp); 132 133 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks)) 134 xchk_block_set_corrupt(sc, bp); 135 136 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount)) 137 xchk_block_set_corrupt(sc, bp); 138 139 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks)) 140 xchk_block_set_corrupt(sc, bp); 141 142 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks)) 143 xchk_block_set_corrupt(sc, bp); 144 145 /* Check sb_versionnum bits that are set at mkfs time. */ 146 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS | 147 XFS_SB_VERSION_NUMBITS | 148 XFS_SB_VERSION_ALIGNBIT | 149 XFS_SB_VERSION_DALIGNBIT | 150 XFS_SB_VERSION_SHAREDBIT | 151 XFS_SB_VERSION_LOGV2BIT | 152 XFS_SB_VERSION_SECTORBIT | 153 XFS_SB_VERSION_EXTFLGBIT | 154 XFS_SB_VERSION_DIRV2BIT); 155 if ((sb->sb_versionnum & vernum_mask) != 156 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) 157 xchk_block_set_corrupt(sc, bp); 158 159 /* Check sb_versionnum bits that can be set after mkfs time. */ 160 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT | 161 XFS_SB_VERSION_NLINKBIT | 162 XFS_SB_VERSION_QUOTABIT); 163 if ((sb->sb_versionnum & vernum_mask) != 164 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) 165 xchk_block_set_preen(sc, bp); 166 167 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize)) 168 xchk_block_set_corrupt(sc, bp); 169 170 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize)) 171 xchk_block_set_corrupt(sc, bp); 172 173 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock)) 174 xchk_block_set_corrupt(sc, bp); 175 176 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname))) 177 xchk_block_set_preen(sc, bp); 178 179 if (sb->sb_blocklog != mp->m_sb.sb_blocklog) 180 xchk_block_set_corrupt(sc, bp); 181 182 if (sb->sb_sectlog != mp->m_sb.sb_sectlog) 183 xchk_block_set_corrupt(sc, bp); 184 185 if (sb->sb_inodelog != mp->m_sb.sb_inodelog) 186 xchk_block_set_corrupt(sc, bp); 187 188 if (sb->sb_inopblog != mp->m_sb.sb_inopblog) 189 xchk_block_set_corrupt(sc, bp); 190 191 if (sb->sb_agblklog != mp->m_sb.sb_agblklog) 192 xchk_block_set_corrupt(sc, bp); 193 194 if (sb->sb_rextslog != mp->m_sb.sb_rextslog) 195 xchk_block_set_corrupt(sc, bp); 196 197 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct) 198 xchk_block_set_preen(sc, bp); 199 200 /* 201 * Skip the summary counters since we track them in memory anyway. 202 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents 203 */ 204 205 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino)) 206 xchk_block_set_preen(sc, bp); 207 208 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino)) 209 xchk_block_set_preen(sc, bp); 210 211 /* 212 * Skip the quota flags since repair will force quotacheck. 213 * sb_qflags 214 */ 215 216 if (sb->sb_flags != mp->m_sb.sb_flags) 217 xchk_block_set_corrupt(sc, bp); 218 219 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn) 220 xchk_block_set_corrupt(sc, bp); 221 222 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt)) 223 xchk_block_set_corrupt(sc, bp); 224 225 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit)) 226 xchk_block_set_preen(sc, bp); 227 228 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width)) 229 xchk_block_set_preen(sc, bp); 230 231 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog) 232 xchk_block_set_corrupt(sc, bp); 233 234 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog) 235 xchk_block_set_corrupt(sc, bp); 236 237 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize)) 238 xchk_block_set_corrupt(sc, bp); 239 240 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit)) 241 xchk_block_set_corrupt(sc, bp); 242 243 /* Do we see any invalid bits in sb_features2? */ 244 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) { 245 if (sb->sb_features2 != 0) 246 xchk_block_set_corrupt(sc, bp); 247 } else { 248 v2_ok = XFS_SB_VERSION2_OKBITS; 249 if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5) 250 v2_ok |= XFS_SB_VERSION2_CRCBIT; 251 252 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok))) 253 xchk_block_set_corrupt(sc, bp); 254 255 if (sb->sb_features2 != sb->sb_bad_features2) 256 xchk_block_set_preen(sc, bp); 257 } 258 259 /* Check sb_features2 flags that are set at mkfs time. */ 260 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT | 261 XFS_SB_VERSION2_PROJID32BIT | 262 XFS_SB_VERSION2_CRCBIT | 263 XFS_SB_VERSION2_FTYPE); 264 if ((sb->sb_features2 & features_mask) != 265 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) 266 xchk_block_set_corrupt(sc, bp); 267 268 /* Check sb_features2 flags that can be set after mkfs time. */ 269 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT); 270 if ((sb->sb_features2 & features_mask) != 271 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) 272 xchk_block_set_corrupt(sc, bp); 273 274 if (!xfs_sb_version_hascrc(&mp->m_sb)) { 275 /* all v5 fields must be zero */ 276 if (memchr_inv(&sb->sb_features_compat, 0, 277 sizeof(struct xfs_dsb) - 278 offsetof(struct xfs_dsb, sb_features_compat))) 279 xchk_block_set_corrupt(sc, bp); 280 } else { 281 /* Check compat flags; all are set at mkfs time. */ 282 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN); 283 if ((sb->sb_features_compat & features_mask) != 284 (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask)) 285 xchk_block_set_corrupt(sc, bp); 286 287 /* Check ro compat flags; all are set at mkfs time. */ 288 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN | 289 XFS_SB_FEAT_RO_COMPAT_FINOBT | 290 XFS_SB_FEAT_RO_COMPAT_RMAPBT | 291 XFS_SB_FEAT_RO_COMPAT_REFLINK); 292 if ((sb->sb_features_ro_compat & features_mask) != 293 (cpu_to_be32(mp->m_sb.sb_features_ro_compat) & 294 features_mask)) 295 xchk_block_set_corrupt(sc, bp); 296 297 /* Check incompat flags; all are set at mkfs time. */ 298 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN | 299 XFS_SB_FEAT_INCOMPAT_FTYPE | 300 XFS_SB_FEAT_INCOMPAT_SPINODES | 301 XFS_SB_FEAT_INCOMPAT_META_UUID); 302 if ((sb->sb_features_incompat & features_mask) != 303 (cpu_to_be32(mp->m_sb.sb_features_incompat) & 304 features_mask)) 305 xchk_block_set_corrupt(sc, bp); 306 307 /* Check log incompat flags; all are set at mkfs time. */ 308 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN); 309 if ((sb->sb_features_log_incompat & features_mask) != 310 (cpu_to_be32(mp->m_sb.sb_features_log_incompat) & 311 features_mask)) 312 xchk_block_set_corrupt(sc, bp); 313 314 /* Don't care about sb_crc */ 315 316 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align)) 317 xchk_block_set_corrupt(sc, bp); 318 319 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino)) 320 xchk_block_set_preen(sc, bp); 321 322 /* Don't care about sb_lsn */ 323 } 324 325 if (xfs_sb_version_hasmetauuid(&mp->m_sb)) { 326 /* The metadata UUID must be the same for all supers */ 327 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid)) 328 xchk_block_set_corrupt(sc, bp); 329 } 330 331 /* Everything else must be zero. */ 332 if (memchr_inv(sb + 1, 0, 333 BBTOB(bp->b_length) - sizeof(struct xfs_dsb))) 334 xchk_block_set_corrupt(sc, bp); 335 336 xchk_superblock_xref(sc, bp); 337 338 return error; 339 } 340 341 /* AGF */ 342 343 /* Tally freespace record lengths. */ 344 STATIC int 345 xchk_agf_record_bno_lengths( 346 struct xfs_btree_cur *cur, 347 struct xfs_alloc_rec_incore *rec, 348 void *priv) 349 { 350 xfs_extlen_t *blocks = priv; 351 352 (*blocks) += rec->ar_blockcount; 353 return 0; 354 } 355 356 /* Check agf_freeblks */ 357 static inline void 358 xchk_agf_xref_freeblks( 359 struct xfs_scrub *sc) 360 { 361 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 362 xfs_extlen_t blocks = 0; 363 int error; 364 365 if (!sc->sa.bno_cur) 366 return; 367 368 error = xfs_alloc_query_all(sc->sa.bno_cur, 369 xchk_agf_record_bno_lengths, &blocks); 370 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) 371 return; 372 if (blocks != be32_to_cpu(agf->agf_freeblks)) 373 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 374 } 375 376 /* Cross reference the AGF with the cntbt (freespace by length btree) */ 377 static inline void 378 xchk_agf_xref_cntbt( 379 struct xfs_scrub *sc) 380 { 381 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 382 xfs_agblock_t agbno; 383 xfs_extlen_t blocks; 384 int have; 385 int error; 386 387 if (!sc->sa.cnt_cur) 388 return; 389 390 /* Any freespace at all? */ 391 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have); 392 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) 393 return; 394 if (!have) { 395 if (agf->agf_freeblks != cpu_to_be32(0)) 396 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 397 return; 398 } 399 400 /* Check agf_longest */ 401 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have); 402 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) 403 return; 404 if (!have || blocks != be32_to_cpu(agf->agf_longest)) 405 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 406 } 407 408 /* Check the btree block counts in the AGF against the btrees. */ 409 STATIC void 410 xchk_agf_xref_btreeblks( 411 struct xfs_scrub *sc) 412 { 413 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 414 struct xfs_mount *mp = sc->mp; 415 xfs_agblock_t blocks; 416 xfs_agblock_t btreeblks; 417 int error; 418 419 /* agf_btreeblks didn't exist before lazysbcount */ 420 if (!xfs_sb_version_haslazysbcount(&sc->mp->m_sb)) 421 return; 422 423 /* Check agf_rmap_blocks; set up for agf_btreeblks check */ 424 if (sc->sa.rmap_cur) { 425 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks); 426 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 427 return; 428 btreeblks = blocks - 1; 429 if (blocks != be32_to_cpu(agf->agf_rmap_blocks)) 430 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 431 } else { 432 btreeblks = 0; 433 } 434 435 /* 436 * No rmap cursor; we can't xref if we have the rmapbt feature. 437 * We also can't do it if we're missing the free space btree cursors. 438 */ 439 if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) || 440 !sc->sa.bno_cur || !sc->sa.cnt_cur) 441 return; 442 443 /* Check agf_btreeblks */ 444 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); 445 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) 446 return; 447 btreeblks += blocks - 1; 448 449 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); 450 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) 451 return; 452 btreeblks += blocks - 1; 453 454 if (btreeblks != be32_to_cpu(agf->agf_btreeblks)) 455 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 456 } 457 458 /* Check agf_refcount_blocks against tree size */ 459 static inline void 460 xchk_agf_xref_refcblks( 461 struct xfs_scrub *sc) 462 { 463 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 464 xfs_agblock_t blocks; 465 int error; 466 467 if (!sc->sa.refc_cur) 468 return; 469 470 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks); 471 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) 472 return; 473 if (blocks != be32_to_cpu(agf->agf_refcount_blocks)) 474 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); 475 } 476 477 /* Cross-reference with the other btrees. */ 478 STATIC void 479 xchk_agf_xref( 480 struct xfs_scrub *sc) 481 { 482 struct xfs_mount *mp = sc->mp; 483 xfs_agblock_t agbno; 484 485 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 486 return; 487 488 agbno = XFS_AGF_BLOCK(mp); 489 490 xchk_ag_btcur_init(sc, &sc->sa); 491 492 xchk_xref_is_used_space(sc, agbno, 1); 493 xchk_agf_xref_freeblks(sc); 494 xchk_agf_xref_cntbt(sc); 495 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 496 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 497 xchk_agf_xref_btreeblks(sc); 498 xchk_xref_is_not_shared(sc, agbno, 1); 499 xchk_agf_xref_refcblks(sc); 500 501 /* scrub teardown will take care of sc->sa for us */ 502 } 503 504 /* Scrub the AGF. */ 505 int 506 xchk_agf( 507 struct xfs_scrub *sc) 508 { 509 struct xfs_mount *mp = sc->mp; 510 struct xfs_agf *agf; 511 struct xfs_perag *pag; 512 xfs_agnumber_t agno = sc->sm->sm_agno; 513 xfs_agblock_t agbno; 514 xfs_agblock_t eoag; 515 xfs_agblock_t agfl_first; 516 xfs_agblock_t agfl_last; 517 xfs_agblock_t agfl_count; 518 xfs_agblock_t fl_count; 519 int level; 520 int error = 0; 521 522 error = xchk_ag_read_headers(sc, agno, &sc->sa); 523 if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error)) 524 goto out; 525 xchk_buffer_recheck(sc, sc->sa.agf_bp); 526 527 agf = sc->sa.agf_bp->b_addr; 528 529 /* Check the AG length */ 530 eoag = be32_to_cpu(agf->agf_length); 531 if (eoag != xfs_ag_block_count(mp, agno)) 532 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 533 534 /* Check the AGF btree roots and levels */ 535 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]); 536 if (!xfs_verify_agbno(mp, agno, agbno)) 537 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 538 539 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]); 540 if (!xfs_verify_agbno(mp, agno, agbno)) 541 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 542 543 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); 544 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 545 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 546 547 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); 548 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 549 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 550 551 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { 552 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]); 553 if (!xfs_verify_agbno(mp, agno, agbno)) 554 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 555 556 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]); 557 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 558 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 559 } 560 561 if (xfs_sb_version_hasreflink(&mp->m_sb)) { 562 agbno = be32_to_cpu(agf->agf_refcount_root); 563 if (!xfs_verify_agbno(mp, agno, agbno)) 564 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 565 566 level = be32_to_cpu(agf->agf_refcount_level); 567 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 568 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 569 } 570 571 /* Check the AGFL counters */ 572 agfl_first = be32_to_cpu(agf->agf_flfirst); 573 agfl_last = be32_to_cpu(agf->agf_fllast); 574 agfl_count = be32_to_cpu(agf->agf_flcount); 575 if (agfl_last > agfl_first) 576 fl_count = agfl_last - agfl_first + 1; 577 else 578 fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1; 579 if (agfl_count != 0 && fl_count != agfl_count) 580 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 581 582 /* Do the incore counters match? */ 583 pag = xfs_perag_get(mp, agno); 584 if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks)) 585 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 586 if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount)) 587 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 588 if (xfs_sb_version_haslazysbcount(&sc->mp->m_sb) && 589 pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks)) 590 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 591 xfs_perag_put(pag); 592 593 xchk_agf_xref(sc); 594 out: 595 return error; 596 } 597 598 /* AGFL */ 599 600 struct xchk_agfl_info { 601 unsigned int sz_entries; 602 unsigned int nr_entries; 603 xfs_agblock_t *entries; 604 struct xfs_scrub *sc; 605 }; 606 607 /* Cross-reference with the other btrees. */ 608 STATIC void 609 xchk_agfl_block_xref( 610 struct xfs_scrub *sc, 611 xfs_agblock_t agbno) 612 { 613 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 614 return; 615 616 xchk_xref_is_used_space(sc, agbno, 1); 617 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 618 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG); 619 xchk_xref_is_not_shared(sc, agbno, 1); 620 } 621 622 /* Scrub an AGFL block. */ 623 STATIC int 624 xchk_agfl_block( 625 struct xfs_mount *mp, 626 xfs_agblock_t agbno, 627 void *priv) 628 { 629 struct xchk_agfl_info *sai = priv; 630 struct xfs_scrub *sc = sai->sc; 631 xfs_agnumber_t agno = sc->sa.agno; 632 633 if (xfs_verify_agbno(mp, agno, agbno) && 634 sai->nr_entries < sai->sz_entries) 635 sai->entries[sai->nr_entries++] = agbno; 636 else 637 xchk_block_set_corrupt(sc, sc->sa.agfl_bp); 638 639 xchk_agfl_block_xref(sc, agbno); 640 641 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 642 return -ECANCELED; 643 644 return 0; 645 } 646 647 static int 648 xchk_agblock_cmp( 649 const void *pa, 650 const void *pb) 651 { 652 const xfs_agblock_t *a = pa; 653 const xfs_agblock_t *b = pb; 654 655 return (int)*a - (int)*b; 656 } 657 658 /* Cross-reference with the other btrees. */ 659 STATIC void 660 xchk_agfl_xref( 661 struct xfs_scrub *sc) 662 { 663 struct xfs_mount *mp = sc->mp; 664 xfs_agblock_t agbno; 665 666 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 667 return; 668 669 agbno = XFS_AGFL_BLOCK(mp); 670 671 xchk_ag_btcur_init(sc, &sc->sa); 672 673 xchk_xref_is_used_space(sc, agbno, 1); 674 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 675 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 676 xchk_xref_is_not_shared(sc, agbno, 1); 677 678 /* 679 * Scrub teardown will take care of sc->sa for us. Leave sc->sa 680 * active so that the agfl block xref can use it too. 681 */ 682 } 683 684 /* Scrub the AGFL. */ 685 int 686 xchk_agfl( 687 struct xfs_scrub *sc) 688 { 689 struct xchk_agfl_info sai; 690 struct xfs_agf *agf; 691 xfs_agnumber_t agno = sc->sm->sm_agno; 692 unsigned int agflcount; 693 unsigned int i; 694 int error; 695 696 error = xchk_ag_read_headers(sc, agno, &sc->sa); 697 if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error)) 698 goto out; 699 if (!sc->sa.agf_bp) 700 return -EFSCORRUPTED; 701 xchk_buffer_recheck(sc, sc->sa.agfl_bp); 702 703 xchk_agfl_xref(sc); 704 705 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 706 goto out; 707 708 /* Allocate buffer to ensure uniqueness of AGFL entries. */ 709 agf = sc->sa.agf_bp->b_addr; 710 agflcount = be32_to_cpu(agf->agf_flcount); 711 if (agflcount > xfs_agfl_size(sc->mp)) { 712 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 713 goto out; 714 } 715 memset(&sai, 0, sizeof(sai)); 716 sai.sc = sc; 717 sai.sz_entries = agflcount; 718 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, 719 KM_MAYFAIL); 720 if (!sai.entries) { 721 error = -ENOMEM; 722 goto out; 723 } 724 725 /* Check the blocks in the AGFL. */ 726 error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr, 727 sc->sa.agfl_bp, xchk_agfl_block, &sai); 728 if (error == -ECANCELED) { 729 error = 0; 730 goto out_free; 731 } 732 if (error) 733 goto out_free; 734 735 if (agflcount != sai.nr_entries) { 736 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 737 goto out_free; 738 } 739 740 /* Sort entries, check for duplicates. */ 741 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]), 742 xchk_agblock_cmp, NULL); 743 for (i = 1; i < sai.nr_entries; i++) { 744 if (sai.entries[i] == sai.entries[i - 1]) { 745 xchk_block_set_corrupt(sc, sc->sa.agf_bp); 746 break; 747 } 748 } 749 750 out_free: 751 kmem_free(sai.entries); 752 out: 753 return error; 754 } 755 756 /* AGI */ 757 758 /* Check agi_count/agi_freecount */ 759 static inline void 760 xchk_agi_xref_icounts( 761 struct xfs_scrub *sc) 762 { 763 struct xfs_agi *agi = sc->sa.agi_bp->b_addr; 764 xfs_agino_t icount; 765 xfs_agino_t freecount; 766 int error; 767 768 if (!sc->sa.ino_cur) 769 return; 770 771 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount); 772 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) 773 return; 774 if (be32_to_cpu(agi->agi_count) != icount || 775 be32_to_cpu(agi->agi_freecount) != freecount) 776 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); 777 } 778 779 /* Check agi_[fi]blocks against tree size */ 780 static inline void 781 xchk_agi_xref_fiblocks( 782 struct xfs_scrub *sc) 783 { 784 struct xfs_agi *agi = sc->sa.agi_bp->b_addr; 785 xfs_agblock_t blocks; 786 int error = 0; 787 788 if (!xfs_sb_version_hasinobtcounts(&sc->mp->m_sb)) 789 return; 790 791 if (sc->sa.ino_cur) { 792 error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks); 793 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) 794 return; 795 if (blocks != be32_to_cpu(agi->agi_iblocks)) 796 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); 797 } 798 799 if (sc->sa.fino_cur) { 800 error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks); 801 if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur)) 802 return; 803 if (blocks != be32_to_cpu(agi->agi_fblocks)) 804 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); 805 } 806 } 807 808 /* Cross-reference with the other btrees. */ 809 STATIC void 810 xchk_agi_xref( 811 struct xfs_scrub *sc) 812 { 813 struct xfs_mount *mp = sc->mp; 814 xfs_agblock_t agbno; 815 816 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 817 return; 818 819 agbno = XFS_AGI_BLOCK(mp); 820 821 xchk_ag_btcur_init(sc, &sc->sa); 822 823 xchk_xref_is_used_space(sc, agbno, 1); 824 xchk_xref_is_not_inode_chunk(sc, agbno, 1); 825 xchk_agi_xref_icounts(sc); 826 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); 827 xchk_xref_is_not_shared(sc, agbno, 1); 828 xchk_agi_xref_fiblocks(sc); 829 830 /* scrub teardown will take care of sc->sa for us */ 831 } 832 833 /* Scrub the AGI. */ 834 int 835 xchk_agi( 836 struct xfs_scrub *sc) 837 { 838 struct xfs_mount *mp = sc->mp; 839 struct xfs_agi *agi; 840 struct xfs_perag *pag; 841 xfs_agnumber_t agno = sc->sm->sm_agno; 842 xfs_agblock_t agbno; 843 xfs_agblock_t eoag; 844 xfs_agino_t agino; 845 xfs_agino_t first_agino; 846 xfs_agino_t last_agino; 847 xfs_agino_t icount; 848 int i; 849 int level; 850 int error = 0; 851 852 error = xchk_ag_read_headers(sc, agno, &sc->sa); 853 if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error)) 854 goto out; 855 xchk_buffer_recheck(sc, sc->sa.agi_bp); 856 857 agi = sc->sa.agi_bp->b_addr; 858 859 /* Check the AG length */ 860 eoag = be32_to_cpu(agi->agi_length); 861 if (eoag != xfs_ag_block_count(mp, agno)) 862 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 863 864 /* Check btree roots and levels */ 865 agbno = be32_to_cpu(agi->agi_root); 866 if (!xfs_verify_agbno(mp, agno, agbno)) 867 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 868 869 level = be32_to_cpu(agi->agi_level); 870 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 871 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 872 873 if (xfs_sb_version_hasfinobt(&mp->m_sb)) { 874 agbno = be32_to_cpu(agi->agi_free_root); 875 if (!xfs_verify_agbno(mp, agno, agbno)) 876 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 877 878 level = be32_to_cpu(agi->agi_free_level); 879 if (level <= 0 || level > XFS_BTREE_MAXLEVELS) 880 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 881 } 882 883 /* Check inode counters */ 884 xfs_agino_range(mp, agno, &first_agino, &last_agino); 885 icount = be32_to_cpu(agi->agi_count); 886 if (icount > last_agino - first_agino + 1 || 887 icount < be32_to_cpu(agi->agi_freecount)) 888 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 889 890 /* Check inode pointers */ 891 agino = be32_to_cpu(agi->agi_newino); 892 if (!xfs_verify_agino_or_null(mp, agno, agino)) 893 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 894 895 agino = be32_to_cpu(agi->agi_dirino); 896 if (!xfs_verify_agino_or_null(mp, agno, agino)) 897 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 898 899 /* Check unlinked inode buckets */ 900 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { 901 agino = be32_to_cpu(agi->agi_unlinked[i]); 902 if (!xfs_verify_agino_or_null(mp, agno, agino)) 903 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 904 } 905 906 if (agi->agi_pad32 != cpu_to_be32(0)) 907 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 908 909 /* Do the incore counters match? */ 910 pag = xfs_perag_get(mp, agno); 911 if (pag->pagi_count != be32_to_cpu(agi->agi_count)) 912 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 913 if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount)) 914 xchk_block_set_corrupt(sc, sc->sa.agi_bp); 915 xfs_perag_put(pag); 916 917 xchk_agi_xref(sc); 918 out: 919 return error; 920 } 921