1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_defer.h" 13 #include "xfs_btree.h" 14 #include "xfs_bit.h" 15 #include "xfs_log_format.h" 16 #include "xfs_trans.h" 17 #include "xfs_sb.h" 18 #include "xfs_inode.h" 19 #include "xfs_alloc.h" 20 #include "xfs_ialloc.h" 21 #include "xfs_ialloc_btree.h" 22 #include "xfs_icache.h" 23 #include "xfs_rmap.h" 24 #include "xfs_log.h" 25 #include "xfs_trans_priv.h" 26 #include "scrub/xfs_scrub.h" 27 #include "scrub/scrub.h" 28 #include "scrub/common.h" 29 #include "scrub/btree.h" 30 #include "scrub/trace.h" 31 32 /* 33 * Set us up to scrub inode btrees. 34 * If we detect a discrepancy between the inobt and the inode, 35 * try again after forcing logged inode cores out to disk. 36 */ 37 int 38 xchk_setup_ag_iallocbt( 39 struct xfs_scrub *sc, 40 struct xfs_inode *ip) 41 { 42 return xchk_setup_ag_btree(sc, ip, sc->flags & XCHK_TRY_HARDER); 43 } 44 45 /* Inode btree scrubber. */ 46 47 struct xchk_iallocbt { 48 /* Number of inodes we see while scanning inobt. */ 49 unsigned long long inodes; 50 51 /* Expected next startino, for big block filesystems. */ 52 xfs_agino_t next_startino; 53 54 /* Expected end of the current inode cluster. */ 55 xfs_agino_t next_cluster_ino; 56 }; 57 58 /* 59 * If we're checking the finobt, cross-reference with the inobt. 60 * Otherwise we're checking the inobt; if there is an finobt, make sure 61 * we have a record or not depending on freecount. 62 */ 63 static inline void 64 xchk_iallocbt_chunk_xref_other( 65 struct xfs_scrub *sc, 66 struct xfs_inobt_rec_incore *irec, 67 xfs_agino_t agino) 68 { 69 struct xfs_btree_cur **pcur; 70 bool has_irec; 71 int error; 72 73 if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT) 74 pcur = &sc->sa.ino_cur; 75 else 76 pcur = &sc->sa.fino_cur; 77 if (!(*pcur)) 78 return; 79 error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec); 80 if (!xchk_should_check_xref(sc, &error, pcur)) 81 return; 82 if (((irec->ir_freecount > 0 && !has_irec) || 83 (irec->ir_freecount == 0 && has_irec))) 84 xchk_btree_xref_set_corrupt(sc, *pcur, 0); 85 } 86 87 /* Cross-reference with the other btrees. */ 88 STATIC void 89 xchk_iallocbt_chunk_xref( 90 struct xfs_scrub *sc, 91 struct xfs_inobt_rec_incore *irec, 92 xfs_agino_t agino, 93 xfs_agblock_t agbno, 94 xfs_extlen_t len) 95 { 96 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 97 return; 98 99 xchk_xref_is_used_space(sc, agbno, len); 100 xchk_iallocbt_chunk_xref_other(sc, irec, agino); 101 xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES); 102 xchk_xref_is_not_shared(sc, agbno, len); 103 } 104 105 /* Is this chunk worth checking? */ 106 STATIC bool 107 xchk_iallocbt_chunk( 108 struct xchk_btree *bs, 109 struct xfs_inobt_rec_incore *irec, 110 xfs_agino_t agino, 111 xfs_extlen_t len) 112 { 113 struct xfs_mount *mp = bs->cur->bc_mp; 114 xfs_agnumber_t agno = bs->cur->bc_private.a.agno; 115 xfs_agblock_t bno; 116 117 bno = XFS_AGINO_TO_AGBNO(mp, agino); 118 if (bno + len <= bno || 119 !xfs_verify_agbno(mp, agno, bno) || 120 !xfs_verify_agbno(mp, agno, bno + len - 1)) 121 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 122 123 xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len); 124 125 return true; 126 } 127 128 /* Count the number of free inodes. */ 129 static unsigned int 130 xchk_iallocbt_freecount( 131 xfs_inofree_t freemask) 132 { 133 BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64)); 134 return hweight64(freemask); 135 } 136 137 /* 138 * Check that an inode's allocation status matches ir_free in the inobt 139 * record. First we try querying the in-core inode state, and if the inode 140 * isn't loaded we examine the on-disk inode directly. 141 * 142 * Since there can be 1:M and M:1 mappings between inobt records and inode 143 * clusters, we pass in the inode location information as an inobt record; 144 * the index of an inode cluster within the inobt record (as well as the 145 * cluster buffer itself); and the index of the inode within the cluster. 146 * 147 * @irec is the inobt record. 148 * @irec_ino is the inode offset from the start of the record. 149 * @dip is the on-disk inode. 150 */ 151 STATIC int 152 xchk_iallocbt_check_cluster_ifree( 153 struct xchk_btree *bs, 154 struct xfs_inobt_rec_incore *irec, 155 unsigned int irec_ino, 156 struct xfs_dinode *dip) 157 { 158 struct xfs_mount *mp = bs->cur->bc_mp; 159 xfs_ino_t fsino; 160 xfs_agino_t agino; 161 bool irec_free; 162 bool ino_inuse; 163 bool freemask_ok; 164 int error = 0; 165 166 if (xchk_should_terminate(bs->sc, &error)) 167 return error; 168 169 /* 170 * Given an inobt record and the offset of an inode from the start of 171 * the record, compute which fs inode we're talking about. 172 */ 173 agino = irec->ir_startino + irec_ino; 174 fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino); 175 irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino)); 176 177 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || 178 (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) { 179 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 180 goto out; 181 } 182 183 error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino, 184 &ino_inuse); 185 if (error == -ENODATA) { 186 /* Not cached, just read the disk buffer */ 187 freemask_ok = irec_free ^ !!(dip->di_mode); 188 if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok) 189 return -EDEADLOCK; 190 } else if (error < 0) { 191 /* 192 * Inode is only half assembled, or there was an IO error, 193 * or the verifier failed, so don't bother trying to check. 194 * The inode scrubber can deal with this. 195 */ 196 goto out; 197 } else { 198 /* Inode is all there. */ 199 freemask_ok = irec_free ^ ino_inuse; 200 } 201 if (!freemask_ok) 202 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 203 out: 204 return 0; 205 } 206 207 /* 208 * Check that the holemask and freemask of a hypothetical inode cluster match 209 * what's actually on disk. If sparse inodes are enabled, the cluster does 210 * not actually have to map to inodes if the corresponding holemask bit is set. 211 * 212 * @cluster_base is the first inode in the cluster within the @irec. 213 */ 214 STATIC int 215 xchk_iallocbt_check_cluster( 216 struct xchk_btree *bs, 217 struct xfs_inobt_rec_incore *irec, 218 unsigned int cluster_base) 219 { 220 struct xfs_imap imap; 221 struct xfs_mount *mp = bs->cur->bc_mp; 222 struct xfs_dinode *dip; 223 struct xfs_buf *cluster_bp; 224 unsigned int nr_inodes; 225 xfs_agnumber_t agno = bs->cur->bc_private.a.agno; 226 xfs_agblock_t agbno; 227 unsigned int cluster_index; 228 uint16_t cluster_mask = 0; 229 uint16_t ir_holemask; 230 int error = 0; 231 232 nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK, 233 mp->m_inodes_per_cluster); 234 235 /* Map this inode cluster */ 236 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base); 237 238 /* Compute a bitmask for this cluster that can be used for holemask. */ 239 for (cluster_index = 0; 240 cluster_index < nr_inodes; 241 cluster_index += XFS_INODES_PER_HOLEMASK_BIT) 242 cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) / 243 XFS_INODES_PER_HOLEMASK_BIT); 244 245 /* 246 * Map the first inode of this cluster to a buffer and offset. 247 * Be careful about inobt records that don't align with the start of 248 * the inode buffer when block sizes are large enough to hold multiple 249 * inode chunks. When this happens, cluster_base will be zero but 250 * ir_startino can be large enough to make im_boffset nonzero. 251 */ 252 ir_holemask = (irec->ir_holemask & cluster_mask); 253 imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno); 254 imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster); 255 imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino); 256 257 if (imap.im_boffset != 0 && cluster_base != 0) { 258 ASSERT(imap.im_boffset == 0 || cluster_base == 0); 259 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 260 return 0; 261 } 262 263 trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino, 264 imap.im_blkno, imap.im_len, cluster_base, nr_inodes, 265 cluster_mask, ir_holemask, 266 XFS_INO_TO_OFFSET(mp, irec->ir_startino + 267 cluster_base)); 268 269 /* The whole cluster must be a hole or not a hole. */ 270 if (ir_holemask != cluster_mask && ir_holemask != 0) { 271 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 272 return 0; 273 } 274 275 /* If any part of this is a hole, skip it. */ 276 if (ir_holemask) { 277 xchk_xref_is_not_owned_by(bs->sc, agbno, 278 mp->m_blocks_per_cluster, 279 &XFS_RMAP_OINFO_INODES); 280 return 0; 281 } 282 283 xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster, 284 &XFS_RMAP_OINFO_INODES); 285 286 /* Grab the inode cluster buffer. */ 287 error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp, 288 0, 0); 289 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error)) 290 return error; 291 292 /* Check free status of each inode within this cluster. */ 293 for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) { 294 struct xfs_dinode *dip; 295 296 if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) { 297 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 298 break; 299 } 300 301 dip = xfs_buf_offset(cluster_bp, imap.im_boffset); 302 error = xchk_iallocbt_check_cluster_ifree(bs, irec, 303 cluster_base + cluster_index, dip); 304 if (error) 305 break; 306 imap.im_boffset += mp->m_sb.sb_inodesize; 307 } 308 309 xfs_trans_brelse(bs->cur->bc_tp, cluster_bp); 310 return error; 311 } 312 313 /* 314 * For all the inode clusters that could map to this inobt record, make sure 315 * that the holemask makes sense and that the allocation status of each inode 316 * matches the freemask. 317 */ 318 STATIC int 319 xchk_iallocbt_check_clusters( 320 struct xchk_btree *bs, 321 struct xfs_inobt_rec_incore *irec) 322 { 323 unsigned int cluster_base; 324 int error = 0; 325 326 /* 327 * For the common case where this inobt record maps to multiple inode 328 * clusters this will call _check_cluster for each cluster. 329 * 330 * For the case that multiple inobt records map to a single cluster, 331 * this will call _check_cluster once. 332 */ 333 for (cluster_base = 0; 334 cluster_base < XFS_INODES_PER_CHUNK; 335 cluster_base += bs->sc->mp->m_inodes_per_cluster) { 336 error = xchk_iallocbt_check_cluster(bs, irec, cluster_base); 337 if (error) 338 break; 339 } 340 341 return error; 342 } 343 344 /* 345 * Make sure this inode btree record is aligned properly. Because a fs block 346 * contains multiple inodes, we check that the inobt record is aligned to the 347 * correct inode, not just the correct block on disk. This results in a finer 348 * grained corruption check. 349 */ 350 STATIC void 351 xchk_iallocbt_rec_alignment( 352 struct xchk_btree *bs, 353 struct xfs_inobt_rec_incore *irec) 354 { 355 struct xfs_mount *mp = bs->sc->mp; 356 struct xchk_iallocbt *iabt = bs->private; 357 358 /* 359 * finobt records have different positioning requirements than inobt 360 * records: each finobt record must have a corresponding inobt record. 361 * That is checked in the xref function, so for now we only catch the 362 * obvious case where the record isn't at all aligned properly. 363 * 364 * Note that if a fs block contains more than a single chunk of inodes, 365 * we will have finobt records only for those chunks containing free 366 * inodes, and therefore expect chunk alignment of finobt records. 367 * Otherwise, we expect that the finobt record is aligned to the 368 * cluster alignment as told by the superblock. 369 */ 370 if (bs->cur->bc_btnum == XFS_BTNUM_FINO) { 371 unsigned int imask; 372 373 imask = min_t(unsigned int, XFS_INODES_PER_CHUNK, 374 mp->m_cluster_align_inodes) - 1; 375 if (irec->ir_startino & imask) 376 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 377 return; 378 } 379 380 if (iabt->next_startino != NULLAGINO) { 381 /* 382 * We're midway through a cluster of inodes that is mapped by 383 * multiple inobt records. Did we get the record for the next 384 * irec in the sequence? 385 */ 386 if (irec->ir_startino != iabt->next_startino) { 387 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 388 return; 389 } 390 391 iabt->next_startino += XFS_INODES_PER_CHUNK; 392 393 /* Are we done with the cluster? */ 394 if (iabt->next_startino >= iabt->next_cluster_ino) { 395 iabt->next_startino = NULLAGINO; 396 iabt->next_cluster_ino = NULLAGINO; 397 } 398 return; 399 } 400 401 /* inobt records must be aligned to cluster and inoalignmnt size. */ 402 if (irec->ir_startino & (mp->m_cluster_align_inodes - 1)) { 403 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 404 return; 405 } 406 407 if (irec->ir_startino & (mp->m_inodes_per_cluster - 1)) { 408 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 409 return; 410 } 411 412 if (mp->m_inodes_per_cluster <= XFS_INODES_PER_CHUNK) 413 return; 414 415 /* 416 * If this is the start of an inode cluster that can be mapped by 417 * multiple inobt records, the next inobt record must follow exactly 418 * after this one. 419 */ 420 iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK; 421 iabt->next_cluster_ino = irec->ir_startino + mp->m_inodes_per_cluster; 422 } 423 424 /* Scrub an inobt/finobt record. */ 425 STATIC int 426 xchk_iallocbt_rec( 427 struct xchk_btree *bs, 428 union xfs_btree_rec *rec) 429 { 430 struct xfs_mount *mp = bs->cur->bc_mp; 431 struct xchk_iallocbt *iabt = bs->private; 432 struct xfs_inobt_rec_incore irec; 433 uint64_t holes; 434 xfs_agnumber_t agno = bs->cur->bc_private.a.agno; 435 xfs_agino_t agino; 436 xfs_extlen_t len; 437 int holecount; 438 int i; 439 int error = 0; 440 unsigned int real_freecount; 441 uint16_t holemask; 442 443 xfs_inobt_btrec_to_irec(mp, rec, &irec); 444 445 if (irec.ir_count > XFS_INODES_PER_CHUNK || 446 irec.ir_freecount > XFS_INODES_PER_CHUNK) 447 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 448 449 real_freecount = irec.ir_freecount + 450 (XFS_INODES_PER_CHUNK - irec.ir_count); 451 if (real_freecount != xchk_iallocbt_freecount(irec.ir_free)) 452 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 453 454 agino = irec.ir_startino; 455 /* Record has to be properly aligned within the AG. */ 456 if (!xfs_verify_agino(mp, agno, agino) || 457 !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) { 458 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 459 goto out; 460 } 461 462 xchk_iallocbt_rec_alignment(bs, &irec); 463 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 464 goto out; 465 466 iabt->inodes += irec.ir_count; 467 468 /* Handle non-sparse inodes */ 469 if (!xfs_inobt_issparse(irec.ir_holemask)) { 470 len = XFS_B_TO_FSB(mp, 471 XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize); 472 if (irec.ir_count != XFS_INODES_PER_CHUNK) 473 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 474 475 if (!xchk_iallocbt_chunk(bs, &irec, agino, len)) 476 goto out; 477 goto check_clusters; 478 } 479 480 /* Check each chunk of a sparse inode cluster. */ 481 holemask = irec.ir_holemask; 482 holecount = 0; 483 len = XFS_B_TO_FSB(mp, 484 XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize); 485 holes = ~xfs_inobt_irec_to_allocmask(&irec); 486 if ((holes & irec.ir_free) != holes || 487 irec.ir_freecount > irec.ir_count) 488 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 489 490 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) { 491 if (holemask & 1) 492 holecount += XFS_INODES_PER_HOLEMASK_BIT; 493 else if (!xchk_iallocbt_chunk(bs, &irec, agino, len)) 494 break; 495 holemask >>= 1; 496 agino += XFS_INODES_PER_HOLEMASK_BIT; 497 } 498 499 if (holecount > XFS_INODES_PER_CHUNK || 500 holecount + irec.ir_count != XFS_INODES_PER_CHUNK) 501 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 502 503 check_clusters: 504 error = xchk_iallocbt_check_clusters(bs, &irec); 505 if (error) 506 goto out; 507 508 out: 509 return error; 510 } 511 512 /* 513 * Make sure the inode btrees are as large as the rmap thinks they are. 514 * Don't bother if we're missing btree cursors, as we're already corrupt. 515 */ 516 STATIC void 517 xchk_iallocbt_xref_rmap_btreeblks( 518 struct xfs_scrub *sc, 519 int which) 520 { 521 xfs_filblks_t blocks; 522 xfs_extlen_t inobt_blocks = 0; 523 xfs_extlen_t finobt_blocks = 0; 524 int error; 525 526 if (!sc->sa.ino_cur || !sc->sa.rmap_cur || 527 (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) || 528 xchk_skip_xref(sc->sm)) 529 return; 530 531 /* Check that we saw as many inobt blocks as the rmap says. */ 532 error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks); 533 if (!xchk_process_error(sc, 0, 0, &error)) 534 return; 535 536 if (sc->sa.fino_cur) { 537 error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks); 538 if (!xchk_process_error(sc, 0, 0, &error)) 539 return; 540 } 541 542 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, 543 &XFS_RMAP_OINFO_INOBT, &blocks); 544 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 545 return; 546 if (blocks != inobt_blocks + finobt_blocks) 547 xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0); 548 } 549 550 /* 551 * Make sure that the inobt records point to the same number of blocks as 552 * the rmap says are owned by inodes. 553 */ 554 STATIC void 555 xchk_iallocbt_xref_rmap_inodes( 556 struct xfs_scrub *sc, 557 int which, 558 unsigned long long inodes) 559 { 560 xfs_filblks_t blocks; 561 xfs_filblks_t inode_blocks; 562 int error; 563 564 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) 565 return; 566 567 /* Check that we saw as many inode blocks as the rmap knows about. */ 568 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, 569 &XFS_RMAP_OINFO_INODES, &blocks); 570 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 571 return; 572 inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize); 573 if (blocks != inode_blocks) 574 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 575 } 576 577 /* Scrub the inode btrees for some AG. */ 578 STATIC int 579 xchk_iallocbt( 580 struct xfs_scrub *sc, 581 xfs_btnum_t which) 582 { 583 struct xfs_btree_cur *cur; 584 struct xchk_iallocbt iabt = { 585 .inodes = 0, 586 .next_startino = NULLAGINO, 587 .next_cluster_ino = NULLAGINO, 588 }; 589 int error; 590 591 cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur; 592 error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT, 593 &iabt); 594 if (error) 595 return error; 596 597 xchk_iallocbt_xref_rmap_btreeblks(sc, which); 598 599 /* 600 * If we're scrubbing the inode btree, inode_blocks is the number of 601 * blocks pointed to by all the inode chunk records. Therefore, we 602 * should compare to the number of inode chunk blocks that the rmap 603 * knows about. We can't do this for the finobt since it only points 604 * to inode chunks with free inodes. 605 */ 606 if (which == XFS_BTNUM_INO) 607 xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes); 608 609 return error; 610 } 611 612 int 613 xchk_inobt( 614 struct xfs_scrub *sc) 615 { 616 return xchk_iallocbt(sc, XFS_BTNUM_INO); 617 } 618 619 int 620 xchk_finobt( 621 struct xfs_scrub *sc) 622 { 623 return xchk_iallocbt(sc, XFS_BTNUM_FINO); 624 } 625 626 /* See if an inode btree has (or doesn't have) an inode chunk record. */ 627 static inline void 628 xchk_xref_inode_check( 629 struct xfs_scrub *sc, 630 xfs_agblock_t agbno, 631 xfs_extlen_t len, 632 struct xfs_btree_cur **icur, 633 bool should_have_inodes) 634 { 635 bool has_inodes; 636 int error; 637 638 if (!(*icur) || xchk_skip_xref(sc->sm)) 639 return; 640 641 error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes); 642 if (!xchk_should_check_xref(sc, &error, icur)) 643 return; 644 if (has_inodes != should_have_inodes) 645 xchk_btree_xref_set_corrupt(sc, *icur, 0); 646 } 647 648 /* xref check that the extent is not covered by inodes */ 649 void 650 xchk_xref_is_not_inode_chunk( 651 struct xfs_scrub *sc, 652 xfs_agblock_t agbno, 653 xfs_extlen_t len) 654 { 655 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false); 656 xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false); 657 } 658 659 /* xref check that the extent is covered by inodes */ 660 void 661 xchk_xref_is_inode_chunk( 662 struct xfs_scrub *sc, 663 xfs_agblock_t agbno, 664 xfs_extlen_t len) 665 { 666 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true); 667 } 668