1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_btree.h" 13 #include "scrub/scrub.h" 14 #include "scrub/common.h" 15 #include "scrub/btree.h" 16 #include "scrub/trace.h" 17 18 /* btree scrubbing */ 19 20 /* 21 * Check for btree operation errors. See the section about handling 22 * operational errors in common.c. 23 */ 24 static bool 25 __xchk_btree_process_error( 26 struct xfs_scrub *sc, 27 struct xfs_btree_cur *cur, 28 int level, 29 int *error, 30 __u32 errflag, 31 void *ret_ip) 32 { 33 if (*error == 0) 34 return true; 35 36 switch (*error) { 37 case -EDEADLOCK: 38 /* Used to restart an op with deadlock avoidance. */ 39 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); 40 break; 41 case -EFSBADCRC: 42 case -EFSCORRUPTED: 43 /* Note the badness but don't abort. */ 44 sc->sm->sm_flags |= errflag; 45 *error = 0; 46 /* fall through */ 47 default: 48 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) 49 trace_xchk_ifork_btree_op_error(sc, cur, level, 50 *error, ret_ip); 51 else 52 trace_xchk_btree_op_error(sc, cur, level, 53 *error, ret_ip); 54 break; 55 } 56 return false; 57 } 58 59 bool 60 xchk_btree_process_error( 61 struct xfs_scrub *sc, 62 struct xfs_btree_cur *cur, 63 int level, 64 int *error) 65 { 66 return __xchk_btree_process_error(sc, cur, level, error, 67 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 68 } 69 70 bool 71 xchk_btree_xref_process_error( 72 struct xfs_scrub *sc, 73 struct xfs_btree_cur *cur, 74 int level, 75 int *error) 76 { 77 return __xchk_btree_process_error(sc, cur, level, error, 78 XFS_SCRUB_OFLAG_XFAIL, __return_address); 79 } 80 81 /* Record btree block corruption. */ 82 static void 83 __xchk_btree_set_corrupt( 84 struct xfs_scrub *sc, 85 struct xfs_btree_cur *cur, 86 int level, 87 __u32 errflag, 88 void *ret_ip) 89 { 90 sc->sm->sm_flags |= errflag; 91 92 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) 93 trace_xchk_ifork_btree_error(sc, cur, level, 94 ret_ip); 95 else 96 trace_xchk_btree_error(sc, cur, level, 97 ret_ip); 98 } 99 100 void 101 xchk_btree_set_corrupt( 102 struct xfs_scrub *sc, 103 struct xfs_btree_cur *cur, 104 int level) 105 { 106 __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT, 107 __return_address); 108 } 109 110 void 111 xchk_btree_xref_set_corrupt( 112 struct xfs_scrub *sc, 113 struct xfs_btree_cur *cur, 114 int level) 115 { 116 __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT, 117 __return_address); 118 } 119 120 /* 121 * Make sure this record is in order and doesn't stray outside of the parent 122 * keys. 123 */ 124 STATIC void 125 xchk_btree_rec( 126 struct xchk_btree *bs) 127 { 128 struct xfs_btree_cur *cur = bs->cur; 129 union xfs_btree_rec *rec; 130 union xfs_btree_key key; 131 union xfs_btree_key hkey; 132 union xfs_btree_key *keyp; 133 struct xfs_btree_block *block; 134 struct xfs_btree_block *keyblock; 135 struct xfs_buf *bp; 136 137 block = xfs_btree_get_block(cur, 0, &bp); 138 rec = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block); 139 140 trace_xchk_btree_rec(bs->sc, cur, 0); 141 142 /* If this isn't the first record, are they in order? */ 143 if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec)) 144 xchk_btree_set_corrupt(bs->sc, cur, 0); 145 bs->firstrec = false; 146 memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len); 147 148 if (cur->bc_nlevels == 1) 149 return; 150 151 /* Is this at least as large as the parent low key? */ 152 cur->bc_ops->init_key_from_rec(&key, rec); 153 keyblock = xfs_btree_get_block(cur, 1, &bp); 154 keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[1], keyblock); 155 if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0) 156 xchk_btree_set_corrupt(bs->sc, cur, 1); 157 158 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) 159 return; 160 161 /* Is this no larger than the parent high key? */ 162 cur->bc_ops->init_high_key_from_rec(&hkey, rec); 163 keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[1], keyblock); 164 if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0) 165 xchk_btree_set_corrupt(bs->sc, cur, 1); 166 } 167 168 /* 169 * Make sure this key is in order and doesn't stray outside of the parent 170 * keys. 171 */ 172 STATIC void 173 xchk_btree_key( 174 struct xchk_btree *bs, 175 int level) 176 { 177 struct xfs_btree_cur *cur = bs->cur; 178 union xfs_btree_key *key; 179 union xfs_btree_key *keyp; 180 struct xfs_btree_block *block; 181 struct xfs_btree_block *keyblock; 182 struct xfs_buf *bp; 183 184 block = xfs_btree_get_block(cur, level, &bp); 185 key = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block); 186 187 trace_xchk_btree_key(bs->sc, cur, level); 188 189 /* If this isn't the first key, are they in order? */ 190 if (!bs->firstkey[level] && 191 !cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key)) 192 xchk_btree_set_corrupt(bs->sc, cur, level); 193 bs->firstkey[level] = false; 194 memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len); 195 196 if (level + 1 >= cur->bc_nlevels) 197 return; 198 199 /* Is this at least as large as the parent low key? */ 200 keyblock = xfs_btree_get_block(cur, level + 1, &bp); 201 keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1], keyblock); 202 if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0) 203 xchk_btree_set_corrupt(bs->sc, cur, level); 204 205 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) 206 return; 207 208 /* Is this no larger than the parent high key? */ 209 key = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block); 210 keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1], keyblock); 211 if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0) 212 xchk_btree_set_corrupt(bs->sc, cur, level); 213 } 214 215 /* 216 * Check a btree pointer. Returns true if it's ok to use this pointer. 217 * Callers do not need to set the corrupt flag. 218 */ 219 static bool 220 xchk_btree_ptr_ok( 221 struct xchk_btree *bs, 222 int level, 223 union xfs_btree_ptr *ptr) 224 { 225 bool res; 226 227 /* A btree rooted in an inode has no block pointer to the root. */ 228 if ((bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && 229 level == bs->cur->bc_nlevels) 230 return true; 231 232 /* Otherwise, check the pointers. */ 233 if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS) 234 res = xfs_btree_check_lptr(bs->cur, be64_to_cpu(ptr->l), level); 235 else 236 res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level); 237 if (!res) 238 xchk_btree_set_corrupt(bs->sc, bs->cur, level); 239 240 return res; 241 } 242 243 /* Check that a btree block's sibling matches what we expect it. */ 244 STATIC int 245 xchk_btree_block_check_sibling( 246 struct xchk_btree *bs, 247 int level, 248 int direction, 249 union xfs_btree_ptr *sibling) 250 { 251 struct xfs_btree_cur *cur = bs->cur; 252 struct xfs_btree_block *pblock; 253 struct xfs_buf *pbp; 254 struct xfs_btree_cur *ncur = NULL; 255 union xfs_btree_ptr *pp; 256 int success; 257 int error; 258 259 error = xfs_btree_dup_cursor(cur, &ncur); 260 if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error) || 261 !ncur) 262 return error; 263 264 /* 265 * If the pointer is null, we shouldn't be able to move the upper 266 * level pointer anywhere. 267 */ 268 if (xfs_btree_ptr_is_null(cur, sibling)) { 269 if (direction > 0) 270 error = xfs_btree_increment(ncur, level + 1, &success); 271 else 272 error = xfs_btree_decrement(ncur, level + 1, &success); 273 if (error == 0 && success) 274 xchk_btree_set_corrupt(bs->sc, cur, level); 275 error = 0; 276 goto out; 277 } 278 279 /* Increment upper level pointer. */ 280 if (direction > 0) 281 error = xfs_btree_increment(ncur, level + 1, &success); 282 else 283 error = xfs_btree_decrement(ncur, level + 1, &success); 284 if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error)) 285 goto out; 286 if (!success) { 287 xchk_btree_set_corrupt(bs->sc, cur, level + 1); 288 goto out; 289 } 290 291 /* Compare upper level pointer to sibling pointer. */ 292 pblock = xfs_btree_get_block(ncur, level + 1, &pbp); 293 pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock); 294 if (!xchk_btree_ptr_ok(bs, level + 1, pp)) 295 goto out; 296 if (pbp) 297 xchk_buffer_recheck(bs->sc, pbp); 298 299 if (xfs_btree_diff_two_ptrs(cur, pp, sibling)) 300 xchk_btree_set_corrupt(bs->sc, cur, level); 301 out: 302 xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR); 303 return error; 304 } 305 306 /* Check the siblings of a btree block. */ 307 STATIC int 308 xchk_btree_block_check_siblings( 309 struct xchk_btree *bs, 310 struct xfs_btree_block *block) 311 { 312 struct xfs_btree_cur *cur = bs->cur; 313 union xfs_btree_ptr leftsib; 314 union xfs_btree_ptr rightsib; 315 int level; 316 int error = 0; 317 318 xfs_btree_get_sibling(cur, block, &leftsib, XFS_BB_LEFTSIB); 319 xfs_btree_get_sibling(cur, block, &rightsib, XFS_BB_RIGHTSIB); 320 level = xfs_btree_get_level(block); 321 322 /* Root block should never have siblings. */ 323 if (level == cur->bc_nlevels - 1) { 324 if (!xfs_btree_ptr_is_null(cur, &leftsib) || 325 !xfs_btree_ptr_is_null(cur, &rightsib)) 326 xchk_btree_set_corrupt(bs->sc, cur, level); 327 goto out; 328 } 329 330 /* 331 * Does the left & right sibling pointers match the adjacent 332 * parent level pointers? 333 * (These function absorbs error codes for us.) 334 */ 335 error = xchk_btree_block_check_sibling(bs, level, -1, &leftsib); 336 if (error) 337 return error; 338 error = xchk_btree_block_check_sibling(bs, level, 1, &rightsib); 339 if (error) 340 return error; 341 out: 342 return error; 343 } 344 345 struct check_owner { 346 struct list_head list; 347 xfs_daddr_t daddr; 348 int level; 349 }; 350 351 /* 352 * Make sure this btree block isn't in the free list and that there's 353 * an rmap record for it. 354 */ 355 STATIC int 356 xchk_btree_check_block_owner( 357 struct xchk_btree *bs, 358 int level, 359 xfs_daddr_t daddr) 360 { 361 xfs_agnumber_t agno; 362 xfs_agblock_t agbno; 363 xfs_btnum_t btnum; 364 bool init_sa; 365 int error = 0; 366 367 if (!bs->cur) 368 return 0; 369 370 btnum = bs->cur->bc_btnum; 371 agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr); 372 agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr); 373 374 init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS; 375 if (init_sa) { 376 error = xchk_ag_init(bs->sc, agno, &bs->sc->sa); 377 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 378 level, &error)) 379 return error; 380 } 381 382 xchk_xref_is_used_space(bs->sc, agbno, 1); 383 /* 384 * The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we 385 * have to nullify it (to shut down further block owner checks) if 386 * self-xref encounters problems. 387 */ 388 if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO) 389 bs->cur = NULL; 390 391 xchk_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo); 392 if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP) 393 bs->cur = NULL; 394 395 if (init_sa) 396 xchk_ag_free(bs->sc, &bs->sc->sa); 397 398 return error; 399 } 400 401 /* Check the owner of a btree block. */ 402 STATIC int 403 xchk_btree_check_owner( 404 struct xchk_btree *bs, 405 int level, 406 struct xfs_buf *bp) 407 { 408 struct xfs_btree_cur *cur = bs->cur; 409 struct check_owner *co; 410 411 /* 412 * In theory, xfs_btree_get_block should only give us a null buffer 413 * pointer for the root of a root-in-inode btree type, but we need 414 * to check defensively here in case the cursor state is also screwed 415 * up. 416 */ 417 if (bp == NULL) { 418 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)) 419 xchk_btree_set_corrupt(bs->sc, bs->cur, level); 420 return 0; 421 } 422 423 /* 424 * We want to cross-reference each btree block with the bnobt 425 * and the rmapbt. We cannot cross-reference the bnobt or 426 * rmapbt while scanning the bnobt or rmapbt, respectively, 427 * because we cannot alter the cursor and we'd prefer not to 428 * duplicate cursors. Therefore, save the buffer daddr for 429 * later scanning. 430 */ 431 if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) { 432 co = kmem_alloc(sizeof(struct check_owner), 433 KM_MAYFAIL); 434 if (!co) 435 return -ENOMEM; 436 co->level = level; 437 co->daddr = XFS_BUF_ADDR(bp); 438 list_add_tail(&co->list, &bs->to_check); 439 return 0; 440 } 441 442 return xchk_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp)); 443 } 444 445 /* 446 * Check that this btree block has at least minrecs records or is one of the 447 * special blocks that don't require that. 448 */ 449 STATIC void 450 xchk_btree_check_minrecs( 451 struct xchk_btree *bs, 452 int level, 453 struct xfs_btree_block *block) 454 { 455 struct xfs_btree_cur *cur = bs->cur; 456 unsigned int root_level = cur->bc_nlevels - 1; 457 unsigned int numrecs = be16_to_cpu(block->bb_numrecs); 458 459 /* More records than minrecs means the block is ok. */ 460 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) 461 return; 462 463 /* 464 * For btrees rooted in the inode, it's possible that the root block 465 * contents spilled into a regular ondisk block because there wasn't 466 * enough space in the inode root. The number of records in that 467 * child block might be less than the standard minrecs, but that's ok 468 * provided that there's only one direct child of the root. 469 */ 470 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && 471 level == cur->bc_nlevels - 2) { 472 struct xfs_btree_block *root_block; 473 struct xfs_buf *root_bp; 474 int root_maxrecs; 475 476 root_block = xfs_btree_get_block(cur, root_level, &root_bp); 477 root_maxrecs = cur->bc_ops->get_dmaxrecs(cur, root_level); 478 if (be16_to_cpu(root_block->bb_numrecs) != 1 || 479 numrecs <= root_maxrecs) 480 xchk_btree_set_corrupt(bs->sc, cur, level); 481 return; 482 } 483 484 /* 485 * Otherwise, only the root level is allowed to have fewer than minrecs 486 * records or keyptrs. 487 */ 488 if (level < root_level) 489 xchk_btree_set_corrupt(bs->sc, cur, level); 490 } 491 492 /* 493 * Grab and scrub a btree block given a btree pointer. Returns block 494 * and buffer pointers (if applicable) if they're ok to use. 495 */ 496 STATIC int 497 xchk_btree_get_block( 498 struct xchk_btree *bs, 499 int level, 500 union xfs_btree_ptr *pp, 501 struct xfs_btree_block **pblock, 502 struct xfs_buf **pbp) 503 { 504 xfs_failaddr_t failed_at; 505 int error; 506 507 *pblock = NULL; 508 *pbp = NULL; 509 510 error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock); 511 if (!xchk_btree_process_error(bs->sc, bs->cur, level, &error) || 512 !*pblock) 513 return error; 514 515 xfs_btree_get_block(bs->cur, level, pbp); 516 if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS) 517 failed_at = __xfs_btree_check_lblock(bs->cur, *pblock, 518 level, *pbp); 519 else 520 failed_at = __xfs_btree_check_sblock(bs->cur, *pblock, 521 level, *pbp); 522 if (failed_at) { 523 xchk_btree_set_corrupt(bs->sc, bs->cur, level); 524 return 0; 525 } 526 if (*pbp) 527 xchk_buffer_recheck(bs->sc, *pbp); 528 529 xchk_btree_check_minrecs(bs, level, *pblock); 530 531 /* 532 * Check the block's owner; this function absorbs error codes 533 * for us. 534 */ 535 error = xchk_btree_check_owner(bs, level, *pbp); 536 if (error) 537 return error; 538 539 /* 540 * Check the block's siblings; this function absorbs error codes 541 * for us. 542 */ 543 return xchk_btree_block_check_siblings(bs, *pblock); 544 } 545 546 /* 547 * Check that the low and high keys of this block match the keys stored 548 * in the parent block. 549 */ 550 STATIC void 551 xchk_btree_block_keys( 552 struct xchk_btree *bs, 553 int level, 554 struct xfs_btree_block *block) 555 { 556 union xfs_btree_key block_keys; 557 struct xfs_btree_cur *cur = bs->cur; 558 union xfs_btree_key *high_bk; 559 union xfs_btree_key *parent_keys; 560 union xfs_btree_key *high_pk; 561 struct xfs_btree_block *parent_block; 562 struct xfs_buf *bp; 563 564 if (level >= cur->bc_nlevels - 1) 565 return; 566 567 /* Calculate the keys for this block. */ 568 xfs_btree_get_keys(cur, block, &block_keys); 569 570 /* Obtain the parent's copy of the keys for this block. */ 571 parent_block = xfs_btree_get_block(cur, level + 1, &bp); 572 parent_keys = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1], 573 parent_block); 574 575 if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0) 576 xchk_btree_set_corrupt(bs->sc, cur, 1); 577 578 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) 579 return; 580 581 /* Get high keys */ 582 high_bk = xfs_btree_high_key_from_key(cur, &block_keys); 583 high_pk = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1], 584 parent_block); 585 586 if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0) 587 xchk_btree_set_corrupt(bs->sc, cur, 1); 588 } 589 590 /* 591 * Visit all nodes and leaves of a btree. Check that all pointers and 592 * records are in order, that the keys reflect the records, and use a callback 593 * so that the caller can verify individual records. 594 */ 595 int 596 xchk_btree( 597 struct xfs_scrub *sc, 598 struct xfs_btree_cur *cur, 599 xchk_btree_rec_fn scrub_fn, 600 const struct xfs_owner_info *oinfo, 601 void *private) 602 { 603 struct xchk_btree bs = { 604 .cur = cur, 605 .scrub_rec = scrub_fn, 606 .oinfo = oinfo, 607 .firstrec = true, 608 .private = private, 609 .sc = sc, 610 }; 611 union xfs_btree_ptr ptr; 612 union xfs_btree_ptr *pp; 613 union xfs_btree_rec *recp; 614 struct xfs_btree_block *block; 615 int level; 616 struct xfs_buf *bp; 617 struct check_owner *co; 618 struct check_owner *n; 619 int i; 620 int error = 0; 621 622 /* Initialize scrub state */ 623 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) 624 bs.firstkey[i] = true; 625 INIT_LIST_HEAD(&bs.to_check); 626 627 /* Don't try to check a tree with a height we can't handle. */ 628 if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) { 629 xchk_btree_set_corrupt(sc, cur, 0); 630 goto out; 631 } 632 633 /* 634 * Load the root of the btree. The helper function absorbs 635 * error codes for us. 636 */ 637 level = cur->bc_nlevels - 1; 638 cur->bc_ops->init_ptr_from_cur(cur, &ptr); 639 if (!xchk_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr)) 640 goto out; 641 error = xchk_btree_get_block(&bs, level, &ptr, &block, &bp); 642 if (error || !block) 643 goto out; 644 645 cur->bc_ptrs[level] = 1; 646 647 while (level < cur->bc_nlevels) { 648 block = xfs_btree_get_block(cur, level, &bp); 649 650 if (level == 0) { 651 /* End of leaf, pop back towards the root. */ 652 if (cur->bc_ptrs[level] > 653 be16_to_cpu(block->bb_numrecs)) { 654 xchk_btree_block_keys(&bs, level, block); 655 if (level < cur->bc_nlevels - 1) 656 cur->bc_ptrs[level + 1]++; 657 level++; 658 continue; 659 } 660 661 /* Records in order for scrub? */ 662 xchk_btree_rec(&bs); 663 664 /* Call out to the record checker. */ 665 recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block); 666 error = bs.scrub_rec(&bs, recp); 667 if (error) 668 break; 669 if (xchk_should_terminate(sc, &error) || 670 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 671 break; 672 673 cur->bc_ptrs[level]++; 674 continue; 675 } 676 677 /* End of node, pop back towards the root. */ 678 if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) { 679 xchk_btree_block_keys(&bs, level, block); 680 if (level < cur->bc_nlevels - 1) 681 cur->bc_ptrs[level + 1]++; 682 level++; 683 continue; 684 } 685 686 /* Keys in order for scrub? */ 687 xchk_btree_key(&bs, level); 688 689 /* Drill another level deeper. */ 690 pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block); 691 if (!xchk_btree_ptr_ok(&bs, level, pp)) { 692 cur->bc_ptrs[level]++; 693 continue; 694 } 695 level--; 696 error = xchk_btree_get_block(&bs, level, pp, &block, &bp); 697 if (error || !block) 698 goto out; 699 700 cur->bc_ptrs[level] = 1; 701 } 702 703 out: 704 /* Process deferred owner checks on btree blocks. */ 705 list_for_each_entry_safe(co, n, &bs.to_check, list) { 706 if (!error && bs.cur) 707 error = xchk_btree_check_block_owner(&bs, 708 co->level, co->daddr); 709 list_del(&co->list); 710 kmem_free(co); 711 } 712 713 return error; 714 } 715