1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Qu Wenruo 2017. All rights reserved. 4 */ 5 6 /* 7 * The module is used to catch unexpected/corrupted tree block data. 8 * Such behavior can be caused either by a fuzzed image or bugs. 9 * 10 * The objective is to do leaf/node validation checks when tree block is read 11 * from disk, and check *every* possible member, so other code won't 12 * need to checking them again. 13 * 14 * Due to the potential and unwanted damage, every checker needs to be 15 * carefully reviewed otherwise so it does not prevent mount of valid images. 16 */ 17 18 #include "ctree.h" 19 #include "tree-checker.h" 20 #include "disk-io.h" 21 #include "compression.h" 22 #include "volumes.h" 23 24 /* 25 * Error message should follow the following format: 26 * corrupt <type>: <identifier>, <reason>[, <bad_value>] 27 * 28 * @type: leaf or node 29 * @identifier: the necessary info to locate the leaf/node. 30 * It's recommended to decode key.objecitd/offset if it's 31 * meaningful. 32 * @reason: describe the error 33 * @bad_value: optional, it's recommended to output bad value and its 34 * expected value (range). 35 * 36 * Since comma is used to separate the components, only space is allowed 37 * inside each component. 38 */ 39 40 /* 41 * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt. 42 * Allows callers to customize the output. 43 */ 44 __printf(4, 5) 45 __cold 46 static void generic_err(const struct btrfs_fs_info *fs_info, 47 const struct extent_buffer *eb, int slot, 48 const char *fmt, ...) 49 { 50 struct va_format vaf; 51 va_list args; 52 53 va_start(args, fmt); 54 55 vaf.fmt = fmt; 56 vaf.va = &args; 57 58 btrfs_crit(fs_info, 59 "corrupt %s: root=%llu block=%llu slot=%d, %pV", 60 btrfs_header_level(eb) == 0 ? "leaf" : "node", 61 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf); 62 va_end(args); 63 } 64 65 /* 66 * Customized reporter for extent data item, since its key objectid and 67 * offset has its own meaning. 68 */ 69 __printf(4, 5) 70 __cold 71 static void file_extent_err(const struct btrfs_fs_info *fs_info, 72 const struct extent_buffer *eb, int slot, 73 const char *fmt, ...) 74 { 75 struct btrfs_key key; 76 struct va_format vaf; 77 va_list args; 78 79 btrfs_item_key_to_cpu(eb, &key, slot); 80 va_start(args, fmt); 81 82 vaf.fmt = fmt; 83 vaf.va = &args; 84 85 btrfs_crit(fs_info, 86 "corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV", 87 btrfs_header_level(eb) == 0 ? "leaf" : "node", 88 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, 89 key.objectid, key.offset, &vaf); 90 va_end(args); 91 } 92 93 /* 94 * Return 0 if the btrfs_file_extent_##name is aligned to @alignment 95 * Else return 1 96 */ 97 #define CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, name, alignment) \ 98 ({ \ 99 if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \ 100 file_extent_err((fs_info), (leaf), (slot), \ 101 "invalid %s for file extent, have %llu, should be aligned to %u", \ 102 (#name), btrfs_file_extent_##name((leaf), (fi)), \ 103 (alignment)); \ 104 (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \ 105 }) 106 107 static int check_extent_data_item(struct btrfs_fs_info *fs_info, 108 struct extent_buffer *leaf, 109 struct btrfs_key *key, int slot) 110 { 111 struct btrfs_file_extent_item *fi; 112 u32 sectorsize = fs_info->sectorsize; 113 u32 item_size = btrfs_item_size_nr(leaf, slot); 114 115 if (!IS_ALIGNED(key->offset, sectorsize)) { 116 file_extent_err(fs_info, leaf, slot, 117 "unaligned file_offset for file extent, have %llu should be aligned to %u", 118 key->offset, sectorsize); 119 return -EUCLEAN; 120 } 121 122 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 123 124 if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) { 125 file_extent_err(fs_info, leaf, slot, 126 "invalid type for file extent, have %u expect range [0, %u]", 127 btrfs_file_extent_type(leaf, fi), 128 BTRFS_FILE_EXTENT_TYPES); 129 return -EUCLEAN; 130 } 131 132 /* 133 * Support for new compression/encryption must introduce incompat flag, 134 * and must be caught in open_ctree(). 135 */ 136 if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) { 137 file_extent_err(fs_info, leaf, slot, 138 "invalid compression for file extent, have %u expect range [0, %u]", 139 btrfs_file_extent_compression(leaf, fi), 140 BTRFS_COMPRESS_TYPES); 141 return -EUCLEAN; 142 } 143 if (btrfs_file_extent_encryption(leaf, fi)) { 144 file_extent_err(fs_info, leaf, slot, 145 "invalid encryption for file extent, have %u expect 0", 146 btrfs_file_extent_encryption(leaf, fi)); 147 return -EUCLEAN; 148 } 149 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) { 150 /* Inline extent must have 0 as key offset */ 151 if (key->offset) { 152 file_extent_err(fs_info, leaf, slot, 153 "invalid file_offset for inline file extent, have %llu expect 0", 154 key->offset); 155 return -EUCLEAN; 156 } 157 158 /* Compressed inline extent has no on-disk size, skip it */ 159 if (btrfs_file_extent_compression(leaf, fi) != 160 BTRFS_COMPRESS_NONE) 161 return 0; 162 163 /* Uncompressed inline extent size must match item size */ 164 if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START + 165 btrfs_file_extent_ram_bytes(leaf, fi)) { 166 file_extent_err(fs_info, leaf, slot, 167 "invalid ram_bytes for uncompressed inline extent, have %u expect %llu", 168 item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START + 169 btrfs_file_extent_ram_bytes(leaf, fi)); 170 return -EUCLEAN; 171 } 172 return 0; 173 } 174 175 /* Regular or preallocated extent has fixed item size */ 176 if (item_size != sizeof(*fi)) { 177 file_extent_err(fs_info, leaf, slot, 178 "invalid item size for reg/prealloc file extent, have %u expect %zu", 179 item_size, sizeof(*fi)); 180 return -EUCLEAN; 181 } 182 if (CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, ram_bytes, sectorsize) || 183 CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, disk_bytenr, sectorsize) || 184 CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, disk_num_bytes, sectorsize) || 185 CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, offset, sectorsize) || 186 CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, num_bytes, sectorsize)) 187 return -EUCLEAN; 188 return 0; 189 } 190 191 static int check_csum_item(struct btrfs_fs_info *fs_info, 192 struct extent_buffer *leaf, struct btrfs_key *key, 193 int slot) 194 { 195 u32 sectorsize = fs_info->sectorsize; 196 u32 csumsize = btrfs_super_csum_size(fs_info->super_copy); 197 198 if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) { 199 generic_err(fs_info, leaf, slot, 200 "invalid key objectid for csum item, have %llu expect %llu", 201 key->objectid, BTRFS_EXTENT_CSUM_OBJECTID); 202 return -EUCLEAN; 203 } 204 if (!IS_ALIGNED(key->offset, sectorsize)) { 205 generic_err(fs_info, leaf, slot, 206 "unaligned key offset for csum item, have %llu should be aligned to %u", 207 key->offset, sectorsize); 208 return -EUCLEAN; 209 } 210 if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) { 211 generic_err(fs_info, leaf, slot, 212 "unaligned item size for csum item, have %u should be aligned to %u", 213 btrfs_item_size_nr(leaf, slot), csumsize); 214 return -EUCLEAN; 215 } 216 return 0; 217 } 218 219 /* 220 * Customized reported for dir_item, only important new info is key->objectid, 221 * which represents inode number 222 */ 223 __printf(4, 5) 224 __cold 225 static void dir_item_err(const struct btrfs_fs_info *fs_info, 226 const struct extent_buffer *eb, int slot, 227 const char *fmt, ...) 228 { 229 struct btrfs_key key; 230 struct va_format vaf; 231 va_list args; 232 233 btrfs_item_key_to_cpu(eb, &key, slot); 234 va_start(args, fmt); 235 236 vaf.fmt = fmt; 237 vaf.va = &args; 238 239 btrfs_crit(fs_info, 240 "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV", 241 btrfs_header_level(eb) == 0 ? "leaf" : "node", 242 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, 243 key.objectid, &vaf); 244 va_end(args); 245 } 246 247 static int check_dir_item(struct btrfs_fs_info *fs_info, 248 struct extent_buffer *leaf, 249 struct btrfs_key *key, int slot) 250 { 251 struct btrfs_dir_item *di; 252 u32 item_size = btrfs_item_size_nr(leaf, slot); 253 u32 cur = 0; 254 255 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 256 while (cur < item_size) { 257 u32 name_len; 258 u32 data_len; 259 u32 max_name_len; 260 u32 total_size; 261 u32 name_hash; 262 u8 dir_type; 263 264 /* header itself should not cross item boundary */ 265 if (cur + sizeof(*di) > item_size) { 266 dir_item_err(fs_info, leaf, slot, 267 "dir item header crosses item boundary, have %zu boundary %u", 268 cur + sizeof(*di), item_size); 269 return -EUCLEAN; 270 } 271 272 /* dir type check */ 273 dir_type = btrfs_dir_type(leaf, di); 274 if (dir_type >= BTRFS_FT_MAX) { 275 dir_item_err(fs_info, leaf, slot, 276 "invalid dir item type, have %u expect [0, %u)", 277 dir_type, BTRFS_FT_MAX); 278 return -EUCLEAN; 279 } 280 281 if (key->type == BTRFS_XATTR_ITEM_KEY && 282 dir_type != BTRFS_FT_XATTR) { 283 dir_item_err(fs_info, leaf, slot, 284 "invalid dir item type for XATTR key, have %u expect %u", 285 dir_type, BTRFS_FT_XATTR); 286 return -EUCLEAN; 287 } 288 if (dir_type == BTRFS_FT_XATTR && 289 key->type != BTRFS_XATTR_ITEM_KEY) { 290 dir_item_err(fs_info, leaf, slot, 291 "xattr dir type found for non-XATTR key"); 292 return -EUCLEAN; 293 } 294 if (dir_type == BTRFS_FT_XATTR) 295 max_name_len = XATTR_NAME_MAX; 296 else 297 max_name_len = BTRFS_NAME_LEN; 298 299 /* Name/data length check */ 300 name_len = btrfs_dir_name_len(leaf, di); 301 data_len = btrfs_dir_data_len(leaf, di); 302 if (name_len > max_name_len) { 303 dir_item_err(fs_info, leaf, slot, 304 "dir item name len too long, have %u max %u", 305 name_len, max_name_len); 306 return -EUCLEAN; 307 } 308 if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info)) { 309 dir_item_err(fs_info, leaf, slot, 310 "dir item name and data len too long, have %u max %u", 311 name_len + data_len, 312 BTRFS_MAX_XATTR_SIZE(fs_info)); 313 return -EUCLEAN; 314 } 315 316 if (data_len && dir_type != BTRFS_FT_XATTR) { 317 dir_item_err(fs_info, leaf, slot, 318 "dir item with invalid data len, have %u expect 0", 319 data_len); 320 return -EUCLEAN; 321 } 322 323 total_size = sizeof(*di) + name_len + data_len; 324 325 /* header and name/data should not cross item boundary */ 326 if (cur + total_size > item_size) { 327 dir_item_err(fs_info, leaf, slot, 328 "dir item data crosses item boundary, have %u boundary %u", 329 cur + total_size, item_size); 330 return -EUCLEAN; 331 } 332 333 /* 334 * Special check for XATTR/DIR_ITEM, as key->offset is name 335 * hash, should match its name 336 */ 337 if (key->type == BTRFS_DIR_ITEM_KEY || 338 key->type == BTRFS_XATTR_ITEM_KEY) { 339 char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)]; 340 341 read_extent_buffer(leaf, namebuf, 342 (unsigned long)(di + 1), name_len); 343 name_hash = btrfs_name_hash(namebuf, name_len); 344 if (key->offset != name_hash) { 345 dir_item_err(fs_info, leaf, slot, 346 "name hash mismatch with key, have 0x%016x expect 0x%016llx", 347 name_hash, key->offset); 348 return -EUCLEAN; 349 } 350 } 351 cur += total_size; 352 di = (struct btrfs_dir_item *)((void *)di + total_size); 353 } 354 return 0; 355 } 356 357 __printf(4, 5) 358 __cold 359 static void block_group_err(const struct btrfs_fs_info *fs_info, 360 const struct extent_buffer *eb, int slot, 361 const char *fmt, ...) 362 { 363 struct btrfs_key key; 364 struct va_format vaf; 365 va_list args; 366 367 btrfs_item_key_to_cpu(eb, &key, slot); 368 va_start(args, fmt); 369 370 vaf.fmt = fmt; 371 vaf.va = &args; 372 373 btrfs_crit(fs_info, 374 "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV", 375 btrfs_header_level(eb) == 0 ? "leaf" : "node", 376 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, 377 key.objectid, key.offset, &vaf); 378 va_end(args); 379 } 380 381 static int check_block_group_item(struct btrfs_fs_info *fs_info, 382 struct extent_buffer *leaf, 383 struct btrfs_key *key, int slot) 384 { 385 struct btrfs_block_group_item bgi; 386 u32 item_size = btrfs_item_size_nr(leaf, slot); 387 u64 flags; 388 u64 type; 389 390 /* 391 * Here we don't really care about alignment since extent allocator can 392 * handle it. We care more about the size. 393 */ 394 if (key->offset == 0) { 395 block_group_err(fs_info, leaf, slot, 396 "invalid block group size 0"); 397 return -EUCLEAN; 398 } 399 400 if (item_size != sizeof(bgi)) { 401 block_group_err(fs_info, leaf, slot, 402 "invalid item size, have %u expect %zu", 403 item_size, sizeof(bgi)); 404 return -EUCLEAN; 405 } 406 407 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 408 sizeof(bgi)); 409 if (btrfs_block_group_chunk_objectid(&bgi) != 410 BTRFS_FIRST_CHUNK_TREE_OBJECTID) { 411 block_group_err(fs_info, leaf, slot, 412 "invalid block group chunk objectid, have %llu expect %llu", 413 btrfs_block_group_chunk_objectid(&bgi), 414 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 415 return -EUCLEAN; 416 } 417 418 if (btrfs_block_group_used(&bgi) > key->offset) { 419 block_group_err(fs_info, leaf, slot, 420 "invalid block group used, have %llu expect [0, %llu)", 421 btrfs_block_group_used(&bgi), key->offset); 422 return -EUCLEAN; 423 } 424 425 flags = btrfs_block_group_flags(&bgi); 426 if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) { 427 block_group_err(fs_info, leaf, slot, 428 "invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set", 429 flags & BTRFS_BLOCK_GROUP_PROFILE_MASK, 430 hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)); 431 return -EUCLEAN; 432 } 433 434 type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 435 if (type != BTRFS_BLOCK_GROUP_DATA && 436 type != BTRFS_BLOCK_GROUP_METADATA && 437 type != BTRFS_BLOCK_GROUP_SYSTEM && 438 type != (BTRFS_BLOCK_GROUP_METADATA | 439 BTRFS_BLOCK_GROUP_DATA)) { 440 block_group_err(fs_info, leaf, slot, 441 "invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx", 442 type, hweight64(type), 443 BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA, 444 BTRFS_BLOCK_GROUP_SYSTEM, 445 BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA); 446 return -EUCLEAN; 447 } 448 return 0; 449 } 450 451 /* 452 * Common point to switch the item-specific validation. 453 */ 454 static int check_leaf_item(struct btrfs_fs_info *fs_info, 455 struct extent_buffer *leaf, 456 struct btrfs_key *key, int slot) 457 { 458 int ret = 0; 459 460 switch (key->type) { 461 case BTRFS_EXTENT_DATA_KEY: 462 ret = check_extent_data_item(fs_info, leaf, key, slot); 463 break; 464 case BTRFS_EXTENT_CSUM_KEY: 465 ret = check_csum_item(fs_info, leaf, key, slot); 466 break; 467 case BTRFS_DIR_ITEM_KEY: 468 case BTRFS_DIR_INDEX_KEY: 469 case BTRFS_XATTR_ITEM_KEY: 470 ret = check_dir_item(fs_info, leaf, key, slot); 471 break; 472 case BTRFS_BLOCK_GROUP_ITEM_KEY: 473 ret = check_block_group_item(fs_info, leaf, key, slot); 474 break; 475 } 476 return ret; 477 } 478 479 static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf, 480 bool check_item_data) 481 { 482 /* No valid key type is 0, so all key should be larger than this key */ 483 struct btrfs_key prev_key = {0, 0, 0}; 484 struct btrfs_key key; 485 u32 nritems = btrfs_header_nritems(leaf); 486 int slot; 487 488 if (btrfs_header_level(leaf) != 0) { 489 generic_err(fs_info, leaf, 0, 490 "invalid level for leaf, have %d expect 0", 491 btrfs_header_level(leaf)); 492 return -EUCLEAN; 493 } 494 495 /* 496 * Extent buffers from a relocation tree have a owner field that 497 * corresponds to the subvolume tree they are based on. So just from an 498 * extent buffer alone we can not find out what is the id of the 499 * corresponding subvolume tree, so we can not figure out if the extent 500 * buffer corresponds to the root of the relocation tree or not. So 501 * skip this check for relocation trees. 502 */ 503 if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) { 504 u64 owner = btrfs_header_owner(leaf); 505 struct btrfs_root *check_root; 506 507 /* These trees must never be empty */ 508 if (owner == BTRFS_ROOT_TREE_OBJECTID || 509 owner == BTRFS_CHUNK_TREE_OBJECTID || 510 owner == BTRFS_EXTENT_TREE_OBJECTID || 511 owner == BTRFS_DEV_TREE_OBJECTID || 512 owner == BTRFS_FS_TREE_OBJECTID || 513 owner == BTRFS_DATA_RELOC_TREE_OBJECTID) { 514 generic_err(fs_info, leaf, 0, 515 "invalid root, root %llu must never be empty", 516 owner); 517 return -EUCLEAN; 518 } 519 key.objectid = owner; 520 key.type = BTRFS_ROOT_ITEM_KEY; 521 key.offset = (u64)-1; 522 523 check_root = btrfs_get_fs_root(fs_info, &key, false); 524 /* 525 * The only reason we also check NULL here is that during 526 * open_ctree() some roots has not yet been set up. 527 */ 528 if (!IS_ERR_OR_NULL(check_root)) { 529 struct extent_buffer *eb; 530 531 eb = btrfs_root_node(check_root); 532 /* if leaf is the root, then it's fine */ 533 if (leaf != eb) { 534 generic_err(fs_info, leaf, 0, 535 "invalid nritems, have %u should not be 0 for non-root leaf", 536 nritems); 537 free_extent_buffer(eb); 538 return -EUCLEAN; 539 } 540 free_extent_buffer(eb); 541 } 542 return 0; 543 } 544 545 if (nritems == 0) 546 return 0; 547 548 /* 549 * Check the following things to make sure this is a good leaf, and 550 * leaf users won't need to bother with similar sanity checks: 551 * 552 * 1) key ordering 553 * 2) item offset and size 554 * No overlap, no hole, all inside the leaf. 555 * 3) item content 556 * If possible, do comprehensive sanity check. 557 * NOTE: All checks must only rely on the item data itself. 558 */ 559 for (slot = 0; slot < nritems; slot++) { 560 u32 item_end_expected; 561 int ret; 562 563 btrfs_item_key_to_cpu(leaf, &key, slot); 564 565 /* Make sure the keys are in the right order */ 566 if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) { 567 generic_err(fs_info, leaf, slot, 568 "bad key order, prev (%llu %u %llu) current (%llu %u %llu)", 569 prev_key.objectid, prev_key.type, 570 prev_key.offset, key.objectid, key.type, 571 key.offset); 572 return -EUCLEAN; 573 } 574 575 /* 576 * Make sure the offset and ends are right, remember that the 577 * item data starts at the end of the leaf and grows towards the 578 * front. 579 */ 580 if (slot == 0) 581 item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info); 582 else 583 item_end_expected = btrfs_item_offset_nr(leaf, 584 slot - 1); 585 if (btrfs_item_end_nr(leaf, slot) != item_end_expected) { 586 generic_err(fs_info, leaf, slot, 587 "unexpected item end, have %u expect %u", 588 btrfs_item_end_nr(leaf, slot), 589 item_end_expected); 590 return -EUCLEAN; 591 } 592 593 /* 594 * Check to make sure that we don't point outside of the leaf, 595 * just in case all the items are consistent to each other, but 596 * all point outside of the leaf. 597 */ 598 if (btrfs_item_end_nr(leaf, slot) > 599 BTRFS_LEAF_DATA_SIZE(fs_info)) { 600 generic_err(fs_info, leaf, slot, 601 "slot end outside of leaf, have %u expect range [0, %u]", 602 btrfs_item_end_nr(leaf, slot), 603 BTRFS_LEAF_DATA_SIZE(fs_info)); 604 return -EUCLEAN; 605 } 606 607 /* Also check if the item pointer overlaps with btrfs item. */ 608 if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) > 609 btrfs_item_ptr_offset(leaf, slot)) { 610 generic_err(fs_info, leaf, slot, 611 "slot overlaps with its data, item end %lu data start %lu", 612 btrfs_item_nr_offset(slot) + 613 sizeof(struct btrfs_item), 614 btrfs_item_ptr_offset(leaf, slot)); 615 return -EUCLEAN; 616 } 617 618 if (check_item_data) { 619 /* 620 * Check if the item size and content meet other 621 * criteria 622 */ 623 ret = check_leaf_item(fs_info, leaf, &key, slot); 624 if (ret < 0) 625 return ret; 626 } 627 628 prev_key.objectid = key.objectid; 629 prev_key.type = key.type; 630 prev_key.offset = key.offset; 631 } 632 633 return 0; 634 } 635 636 int btrfs_check_leaf_full(struct btrfs_fs_info *fs_info, 637 struct extent_buffer *leaf) 638 { 639 return check_leaf(fs_info, leaf, true); 640 } 641 642 int btrfs_check_leaf_relaxed(struct btrfs_fs_info *fs_info, 643 struct extent_buffer *leaf) 644 { 645 return check_leaf(fs_info, leaf, false); 646 } 647 648 int btrfs_check_node(struct btrfs_fs_info *fs_info, struct extent_buffer *node) 649 { 650 unsigned long nr = btrfs_header_nritems(node); 651 struct btrfs_key key, next_key; 652 int slot; 653 int level = btrfs_header_level(node); 654 u64 bytenr; 655 int ret = 0; 656 657 if (level <= 0 || level >= BTRFS_MAX_LEVEL) { 658 generic_err(fs_info, node, 0, 659 "invalid level for node, have %d expect [1, %d]", 660 level, BTRFS_MAX_LEVEL - 1); 661 return -EUCLEAN; 662 } 663 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) { 664 btrfs_crit(fs_info, 665 "corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]", 666 btrfs_header_owner(node), node->start, 667 nr == 0 ? "small" : "large", nr, 668 BTRFS_NODEPTRS_PER_BLOCK(fs_info)); 669 return -EUCLEAN; 670 } 671 672 for (slot = 0; slot < nr - 1; slot++) { 673 bytenr = btrfs_node_blockptr(node, slot); 674 btrfs_node_key_to_cpu(node, &key, slot); 675 btrfs_node_key_to_cpu(node, &next_key, slot + 1); 676 677 if (!bytenr) { 678 generic_err(fs_info, node, slot, 679 "invalid NULL node pointer"); 680 ret = -EUCLEAN; 681 goto out; 682 } 683 if (!IS_ALIGNED(bytenr, fs_info->sectorsize)) { 684 generic_err(fs_info, node, slot, 685 "unaligned pointer, have %llu should be aligned to %u", 686 bytenr, fs_info->sectorsize); 687 ret = -EUCLEAN; 688 goto out; 689 } 690 691 if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) { 692 generic_err(fs_info, node, slot, 693 "bad key order, current (%llu %u %llu) next (%llu %u %llu)", 694 key.objectid, key.type, key.offset, 695 next_key.objectid, next_key.type, 696 next_key.offset); 697 ret = -EUCLEAN; 698 goto out; 699 } 700 } 701 out: 702 return ret; 703 } 704