1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Qu Wenruo 2017. All rights reserved. 4 */ 5 6 /* 7 * The module is used to catch unexpected/corrupted tree block data. 8 * Such behavior can be caused either by a fuzzed image or bugs. 9 * 10 * The objective is to do leaf/node validation checks when tree block is read 11 * from disk, and check *every* possible member, so other code won't 12 * need to checking them again. 13 * 14 * Due to the potential and unwanted damage, every checker needs to be 15 * carefully reviewed otherwise so it does not prevent mount of valid images. 16 */ 17 18 #include <linux/types.h> 19 #include <linux/stddef.h> 20 #include <linux/error-injection.h> 21 #include "ctree.h" 22 #include "tree-checker.h" 23 #include "disk-io.h" 24 #include "compression.h" 25 #include "volumes.h" 26 #include "misc.h" 27 28 /* 29 * Error message should follow the following format: 30 * corrupt <type>: <identifier>, <reason>[, <bad_value>] 31 * 32 * @type: leaf or node 33 * @identifier: the necessary info to locate the leaf/node. 34 * It's recommended to decode key.objecitd/offset if it's 35 * meaningful. 36 * @reason: describe the error 37 * @bad_value: optional, it's recommended to output bad value and its 38 * expected value (range). 39 * 40 * Since comma is used to separate the components, only space is allowed 41 * inside each component. 42 */ 43 44 /* 45 * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt. 46 * Allows callers to customize the output. 47 */ 48 __printf(3, 4) 49 __cold 50 static void generic_err(const struct extent_buffer *eb, int slot, 51 const char *fmt, ...) 52 { 53 const struct btrfs_fs_info *fs_info = eb->fs_info; 54 struct va_format vaf; 55 va_list args; 56 57 va_start(args, fmt); 58 59 vaf.fmt = fmt; 60 vaf.va = &args; 61 62 btrfs_crit(fs_info, 63 "corrupt %s: root=%llu block=%llu slot=%d, %pV", 64 btrfs_header_level(eb) == 0 ? "leaf" : "node", 65 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf); 66 va_end(args); 67 } 68 69 /* 70 * Customized reporter for extent data item, since its key objectid and 71 * offset has its own meaning. 72 */ 73 __printf(3, 4) 74 __cold 75 static void file_extent_err(const struct extent_buffer *eb, int slot, 76 const char *fmt, ...) 77 { 78 const struct btrfs_fs_info *fs_info = eb->fs_info; 79 struct btrfs_key key; 80 struct va_format vaf; 81 va_list args; 82 83 btrfs_item_key_to_cpu(eb, &key, slot); 84 va_start(args, fmt); 85 86 vaf.fmt = fmt; 87 vaf.va = &args; 88 89 btrfs_crit(fs_info, 90 "corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV", 91 btrfs_header_level(eb) == 0 ? "leaf" : "node", 92 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, 93 key.objectid, key.offset, &vaf); 94 va_end(args); 95 } 96 97 /* 98 * Return 0 if the btrfs_file_extent_##name is aligned to @alignment 99 * Else return 1 100 */ 101 #define CHECK_FE_ALIGNED(leaf, slot, fi, name, alignment) \ 102 ({ \ 103 if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \ 104 file_extent_err((leaf), (slot), \ 105 "invalid %s for file extent, have %llu, should be aligned to %u", \ 106 (#name), btrfs_file_extent_##name((leaf), (fi)), \ 107 (alignment)); \ 108 (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \ 109 }) 110 111 static u64 file_extent_end(struct extent_buffer *leaf, 112 struct btrfs_key *key, 113 struct btrfs_file_extent_item *extent) 114 { 115 u64 end; 116 u64 len; 117 118 if (btrfs_file_extent_type(leaf, extent) == BTRFS_FILE_EXTENT_INLINE) { 119 len = btrfs_file_extent_ram_bytes(leaf, extent); 120 end = ALIGN(key->offset + len, leaf->fs_info->sectorsize); 121 } else { 122 len = btrfs_file_extent_num_bytes(leaf, extent); 123 end = key->offset + len; 124 } 125 return end; 126 } 127 128 /* 129 * Customized report for dir_item, the only new important information is 130 * key->objectid, which represents inode number 131 */ 132 __printf(3, 4) 133 __cold 134 static void dir_item_err(const struct extent_buffer *eb, int slot, 135 const char *fmt, ...) 136 { 137 const struct btrfs_fs_info *fs_info = eb->fs_info; 138 struct btrfs_key key; 139 struct va_format vaf; 140 va_list args; 141 142 btrfs_item_key_to_cpu(eb, &key, slot); 143 va_start(args, fmt); 144 145 vaf.fmt = fmt; 146 vaf.va = &args; 147 148 btrfs_crit(fs_info, 149 "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV", 150 btrfs_header_level(eb) == 0 ? "leaf" : "node", 151 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, 152 key.objectid, &vaf); 153 va_end(args); 154 } 155 156 /* 157 * This functions checks prev_key->objectid, to ensure current key and prev_key 158 * share the same objectid as inode number. 159 * 160 * This is to detect missing INODE_ITEM in subvolume trees. 161 * 162 * Return true if everything is OK or we don't need to check. 163 * Return false if anything is wrong. 164 */ 165 static bool check_prev_ino(struct extent_buffer *leaf, 166 struct btrfs_key *key, int slot, 167 struct btrfs_key *prev_key) 168 { 169 /* No prev key, skip check */ 170 if (slot == 0) 171 return true; 172 173 /* Only these key->types needs to be checked */ 174 ASSERT(key->type == BTRFS_XATTR_ITEM_KEY || 175 key->type == BTRFS_INODE_REF_KEY || 176 key->type == BTRFS_DIR_INDEX_KEY || 177 key->type == BTRFS_DIR_ITEM_KEY || 178 key->type == BTRFS_EXTENT_DATA_KEY); 179 180 /* 181 * Only subvolume trees along with their reloc trees need this check. 182 * Things like log tree doesn't follow this ino requirement. 183 */ 184 if (!is_fstree(btrfs_header_owner(leaf))) 185 return true; 186 187 if (key->objectid == prev_key->objectid) 188 return true; 189 190 /* Error found */ 191 dir_item_err(leaf, slot, 192 "invalid previous key objectid, have %llu expect %llu", 193 prev_key->objectid, key->objectid); 194 return false; 195 } 196 static int check_extent_data_item(struct extent_buffer *leaf, 197 struct btrfs_key *key, int slot, 198 struct btrfs_key *prev_key) 199 { 200 struct btrfs_fs_info *fs_info = leaf->fs_info; 201 struct btrfs_file_extent_item *fi; 202 u32 sectorsize = fs_info->sectorsize; 203 u32 item_size = btrfs_item_size_nr(leaf, slot); 204 u64 extent_end; 205 206 if (!IS_ALIGNED(key->offset, sectorsize)) { 207 file_extent_err(leaf, slot, 208 "unaligned file_offset for file extent, have %llu should be aligned to %u", 209 key->offset, sectorsize); 210 return -EUCLEAN; 211 } 212 213 /* 214 * Previous key must have the same key->objectid (ino). 215 * It can be XATTR_ITEM, INODE_ITEM or just another EXTENT_DATA. 216 * But if objectids mismatch, it means we have a missing 217 * INODE_ITEM. 218 */ 219 if (!check_prev_ino(leaf, key, slot, prev_key)) 220 return -EUCLEAN; 221 222 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 223 224 /* 225 * Make sure the item contains at least inline header, so the file 226 * extent type is not some garbage. 227 */ 228 if (item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START) { 229 file_extent_err(leaf, slot, 230 "invalid item size, have %u expect [%lu, %u)", 231 item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START, 232 SZ_4K); 233 return -EUCLEAN; 234 } 235 if (btrfs_file_extent_type(leaf, fi) >= BTRFS_NR_FILE_EXTENT_TYPES) { 236 file_extent_err(leaf, slot, 237 "invalid type for file extent, have %u expect range [0, %u]", 238 btrfs_file_extent_type(leaf, fi), 239 BTRFS_NR_FILE_EXTENT_TYPES - 1); 240 return -EUCLEAN; 241 } 242 243 /* 244 * Support for new compression/encryption must introduce incompat flag, 245 * and must be caught in open_ctree(). 246 */ 247 if (btrfs_file_extent_compression(leaf, fi) >= BTRFS_NR_COMPRESS_TYPES) { 248 file_extent_err(leaf, slot, 249 "invalid compression for file extent, have %u expect range [0, %u]", 250 btrfs_file_extent_compression(leaf, fi), 251 BTRFS_NR_COMPRESS_TYPES - 1); 252 return -EUCLEAN; 253 } 254 if (btrfs_file_extent_encryption(leaf, fi)) { 255 file_extent_err(leaf, slot, 256 "invalid encryption for file extent, have %u expect 0", 257 btrfs_file_extent_encryption(leaf, fi)); 258 return -EUCLEAN; 259 } 260 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) { 261 /* Inline extent must have 0 as key offset */ 262 if (key->offset) { 263 file_extent_err(leaf, slot, 264 "invalid file_offset for inline file extent, have %llu expect 0", 265 key->offset); 266 return -EUCLEAN; 267 } 268 269 /* Compressed inline extent has no on-disk size, skip it */ 270 if (btrfs_file_extent_compression(leaf, fi) != 271 BTRFS_COMPRESS_NONE) 272 return 0; 273 274 /* Uncompressed inline extent size must match item size */ 275 if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START + 276 btrfs_file_extent_ram_bytes(leaf, fi)) { 277 file_extent_err(leaf, slot, 278 "invalid ram_bytes for uncompressed inline extent, have %u expect %llu", 279 item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START + 280 btrfs_file_extent_ram_bytes(leaf, fi)); 281 return -EUCLEAN; 282 } 283 return 0; 284 } 285 286 /* Regular or preallocated extent has fixed item size */ 287 if (item_size != sizeof(*fi)) { 288 file_extent_err(leaf, slot, 289 "invalid item size for reg/prealloc file extent, have %u expect %zu", 290 item_size, sizeof(*fi)); 291 return -EUCLEAN; 292 } 293 if (CHECK_FE_ALIGNED(leaf, slot, fi, ram_bytes, sectorsize) || 294 CHECK_FE_ALIGNED(leaf, slot, fi, disk_bytenr, sectorsize) || 295 CHECK_FE_ALIGNED(leaf, slot, fi, disk_num_bytes, sectorsize) || 296 CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) || 297 CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize)) 298 return -EUCLEAN; 299 300 /* Catch extent end overflow */ 301 if (check_add_overflow(btrfs_file_extent_num_bytes(leaf, fi), 302 key->offset, &extent_end)) { 303 file_extent_err(leaf, slot, 304 "extent end overflow, have file offset %llu extent num bytes %llu", 305 key->offset, 306 btrfs_file_extent_num_bytes(leaf, fi)); 307 return -EUCLEAN; 308 } 309 310 /* 311 * Check that no two consecutive file extent items, in the same leaf, 312 * present ranges that overlap each other. 313 */ 314 if (slot > 0 && 315 prev_key->objectid == key->objectid && 316 prev_key->type == BTRFS_EXTENT_DATA_KEY) { 317 struct btrfs_file_extent_item *prev_fi; 318 u64 prev_end; 319 320 prev_fi = btrfs_item_ptr(leaf, slot - 1, 321 struct btrfs_file_extent_item); 322 prev_end = file_extent_end(leaf, prev_key, prev_fi); 323 if (prev_end > key->offset) { 324 file_extent_err(leaf, slot - 1, 325 "file extent end range (%llu) goes beyond start offset (%llu) of the next file extent", 326 prev_end, key->offset); 327 return -EUCLEAN; 328 } 329 } 330 331 return 0; 332 } 333 334 static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key, 335 int slot) 336 { 337 struct btrfs_fs_info *fs_info = leaf->fs_info; 338 u32 sectorsize = fs_info->sectorsize; 339 u32 csumsize = btrfs_super_csum_size(fs_info->super_copy); 340 341 if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) { 342 generic_err(leaf, slot, 343 "invalid key objectid for csum item, have %llu expect %llu", 344 key->objectid, BTRFS_EXTENT_CSUM_OBJECTID); 345 return -EUCLEAN; 346 } 347 if (!IS_ALIGNED(key->offset, sectorsize)) { 348 generic_err(leaf, slot, 349 "unaligned key offset for csum item, have %llu should be aligned to %u", 350 key->offset, sectorsize); 351 return -EUCLEAN; 352 } 353 if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) { 354 generic_err(leaf, slot, 355 "unaligned item size for csum item, have %u should be aligned to %u", 356 btrfs_item_size_nr(leaf, slot), csumsize); 357 return -EUCLEAN; 358 } 359 return 0; 360 } 361 362 static int check_dir_item(struct extent_buffer *leaf, 363 struct btrfs_key *key, struct btrfs_key *prev_key, 364 int slot) 365 { 366 struct btrfs_fs_info *fs_info = leaf->fs_info; 367 struct btrfs_dir_item *di; 368 u32 item_size = btrfs_item_size_nr(leaf, slot); 369 u32 cur = 0; 370 371 if (!check_prev_ino(leaf, key, slot, prev_key)) 372 return -EUCLEAN; 373 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 374 while (cur < item_size) { 375 u32 name_len; 376 u32 data_len; 377 u32 max_name_len; 378 u32 total_size; 379 u32 name_hash; 380 u8 dir_type; 381 382 /* header itself should not cross item boundary */ 383 if (cur + sizeof(*di) > item_size) { 384 dir_item_err(leaf, slot, 385 "dir item header crosses item boundary, have %zu boundary %u", 386 cur + sizeof(*di), item_size); 387 return -EUCLEAN; 388 } 389 390 /* dir type check */ 391 dir_type = btrfs_dir_type(leaf, di); 392 if (dir_type >= BTRFS_FT_MAX) { 393 dir_item_err(leaf, slot, 394 "invalid dir item type, have %u expect [0, %u)", 395 dir_type, BTRFS_FT_MAX); 396 return -EUCLEAN; 397 } 398 399 if (key->type == BTRFS_XATTR_ITEM_KEY && 400 dir_type != BTRFS_FT_XATTR) { 401 dir_item_err(leaf, slot, 402 "invalid dir item type for XATTR key, have %u expect %u", 403 dir_type, BTRFS_FT_XATTR); 404 return -EUCLEAN; 405 } 406 if (dir_type == BTRFS_FT_XATTR && 407 key->type != BTRFS_XATTR_ITEM_KEY) { 408 dir_item_err(leaf, slot, 409 "xattr dir type found for non-XATTR key"); 410 return -EUCLEAN; 411 } 412 if (dir_type == BTRFS_FT_XATTR) 413 max_name_len = XATTR_NAME_MAX; 414 else 415 max_name_len = BTRFS_NAME_LEN; 416 417 /* Name/data length check */ 418 name_len = btrfs_dir_name_len(leaf, di); 419 data_len = btrfs_dir_data_len(leaf, di); 420 if (name_len > max_name_len) { 421 dir_item_err(leaf, slot, 422 "dir item name len too long, have %u max %u", 423 name_len, max_name_len); 424 return -EUCLEAN; 425 } 426 if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info)) { 427 dir_item_err(leaf, slot, 428 "dir item name and data len too long, have %u max %u", 429 name_len + data_len, 430 BTRFS_MAX_XATTR_SIZE(fs_info)); 431 return -EUCLEAN; 432 } 433 434 if (data_len && dir_type != BTRFS_FT_XATTR) { 435 dir_item_err(leaf, slot, 436 "dir item with invalid data len, have %u expect 0", 437 data_len); 438 return -EUCLEAN; 439 } 440 441 total_size = sizeof(*di) + name_len + data_len; 442 443 /* header and name/data should not cross item boundary */ 444 if (cur + total_size > item_size) { 445 dir_item_err(leaf, slot, 446 "dir item data crosses item boundary, have %u boundary %u", 447 cur + total_size, item_size); 448 return -EUCLEAN; 449 } 450 451 /* 452 * Special check for XATTR/DIR_ITEM, as key->offset is name 453 * hash, should match its name 454 */ 455 if (key->type == BTRFS_DIR_ITEM_KEY || 456 key->type == BTRFS_XATTR_ITEM_KEY) { 457 char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)]; 458 459 read_extent_buffer(leaf, namebuf, 460 (unsigned long)(di + 1), name_len); 461 name_hash = btrfs_name_hash(namebuf, name_len); 462 if (key->offset != name_hash) { 463 dir_item_err(leaf, slot, 464 "name hash mismatch with key, have 0x%016x expect 0x%016llx", 465 name_hash, key->offset); 466 return -EUCLEAN; 467 } 468 } 469 cur += total_size; 470 di = (struct btrfs_dir_item *)((void *)di + total_size); 471 } 472 return 0; 473 } 474 475 __printf(3, 4) 476 __cold 477 static void block_group_err(const struct extent_buffer *eb, int slot, 478 const char *fmt, ...) 479 { 480 const struct btrfs_fs_info *fs_info = eb->fs_info; 481 struct btrfs_key key; 482 struct va_format vaf; 483 va_list args; 484 485 btrfs_item_key_to_cpu(eb, &key, slot); 486 va_start(args, fmt); 487 488 vaf.fmt = fmt; 489 vaf.va = &args; 490 491 btrfs_crit(fs_info, 492 "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV", 493 btrfs_header_level(eb) == 0 ? "leaf" : "node", 494 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, 495 key.objectid, key.offset, &vaf); 496 va_end(args); 497 } 498 499 static int check_block_group_item(struct extent_buffer *leaf, 500 struct btrfs_key *key, int slot) 501 { 502 struct btrfs_block_group_item bgi; 503 u32 item_size = btrfs_item_size_nr(leaf, slot); 504 u64 flags; 505 u64 type; 506 507 /* 508 * Here we don't really care about alignment since extent allocator can 509 * handle it. We care more about the size. 510 */ 511 if (key->offset == 0) { 512 block_group_err(leaf, slot, 513 "invalid block group size 0"); 514 return -EUCLEAN; 515 } 516 517 if (item_size != sizeof(bgi)) { 518 block_group_err(leaf, slot, 519 "invalid item size, have %u expect %zu", 520 item_size, sizeof(bgi)); 521 return -EUCLEAN; 522 } 523 524 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 525 sizeof(bgi)); 526 if (btrfs_stack_block_group_chunk_objectid(&bgi) != 527 BTRFS_FIRST_CHUNK_TREE_OBJECTID) { 528 block_group_err(leaf, slot, 529 "invalid block group chunk objectid, have %llu expect %llu", 530 btrfs_stack_block_group_chunk_objectid(&bgi), 531 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 532 return -EUCLEAN; 533 } 534 535 if (btrfs_stack_block_group_used(&bgi) > key->offset) { 536 block_group_err(leaf, slot, 537 "invalid block group used, have %llu expect [0, %llu)", 538 btrfs_stack_block_group_used(&bgi), key->offset); 539 return -EUCLEAN; 540 } 541 542 flags = btrfs_stack_block_group_flags(&bgi); 543 if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) { 544 block_group_err(leaf, slot, 545 "invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set", 546 flags & BTRFS_BLOCK_GROUP_PROFILE_MASK, 547 hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)); 548 return -EUCLEAN; 549 } 550 551 type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 552 if (type != BTRFS_BLOCK_GROUP_DATA && 553 type != BTRFS_BLOCK_GROUP_METADATA && 554 type != BTRFS_BLOCK_GROUP_SYSTEM && 555 type != (BTRFS_BLOCK_GROUP_METADATA | 556 BTRFS_BLOCK_GROUP_DATA)) { 557 block_group_err(leaf, slot, 558 "invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx", 559 type, hweight64(type), 560 BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA, 561 BTRFS_BLOCK_GROUP_SYSTEM, 562 BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA); 563 return -EUCLEAN; 564 } 565 return 0; 566 } 567 568 __printf(4, 5) 569 __cold 570 static void chunk_err(const struct extent_buffer *leaf, 571 const struct btrfs_chunk *chunk, u64 logical, 572 const char *fmt, ...) 573 { 574 const struct btrfs_fs_info *fs_info = leaf->fs_info; 575 bool is_sb; 576 struct va_format vaf; 577 va_list args; 578 int i; 579 int slot = -1; 580 581 /* Only superblock eb is able to have such small offset */ 582 is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET); 583 584 if (!is_sb) { 585 /* 586 * Get the slot number by iterating through all slots, this 587 * would provide better readability. 588 */ 589 for (i = 0; i < btrfs_header_nritems(leaf); i++) { 590 if (btrfs_item_ptr_offset(leaf, i) == 591 (unsigned long)chunk) { 592 slot = i; 593 break; 594 } 595 } 596 } 597 va_start(args, fmt); 598 vaf.fmt = fmt; 599 vaf.va = &args; 600 601 if (is_sb) 602 btrfs_crit(fs_info, 603 "corrupt superblock syschunk array: chunk_start=%llu, %pV", 604 logical, &vaf); 605 else 606 btrfs_crit(fs_info, 607 "corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV", 608 BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot, 609 logical, &vaf); 610 va_end(args); 611 } 612 613 /* 614 * The common chunk check which could also work on super block sys chunk array. 615 * 616 * Return -EUCLEAN if anything is corrupted. 617 * Return 0 if everything is OK. 618 */ 619 int btrfs_check_chunk_valid(struct extent_buffer *leaf, 620 struct btrfs_chunk *chunk, u64 logical) 621 { 622 struct btrfs_fs_info *fs_info = leaf->fs_info; 623 u64 length; 624 u64 stripe_len; 625 u16 num_stripes; 626 u16 sub_stripes; 627 u64 type; 628 u64 features; 629 bool mixed = false; 630 631 length = btrfs_chunk_length(leaf, chunk); 632 stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 633 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 634 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 635 type = btrfs_chunk_type(leaf, chunk); 636 637 if (!num_stripes) { 638 chunk_err(leaf, chunk, logical, 639 "invalid chunk num_stripes, have %u", num_stripes); 640 return -EUCLEAN; 641 } 642 if (!IS_ALIGNED(logical, fs_info->sectorsize)) { 643 chunk_err(leaf, chunk, logical, 644 "invalid chunk logical, have %llu should aligned to %u", 645 logical, fs_info->sectorsize); 646 return -EUCLEAN; 647 } 648 if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) { 649 chunk_err(leaf, chunk, logical, 650 "invalid chunk sectorsize, have %u expect %u", 651 btrfs_chunk_sector_size(leaf, chunk), 652 fs_info->sectorsize); 653 return -EUCLEAN; 654 } 655 if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) { 656 chunk_err(leaf, chunk, logical, 657 "invalid chunk length, have %llu", length); 658 return -EUCLEAN; 659 } 660 if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) { 661 chunk_err(leaf, chunk, logical, 662 "invalid chunk stripe length: %llu", 663 stripe_len); 664 return -EUCLEAN; 665 } 666 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & 667 type) { 668 chunk_err(leaf, chunk, logical, 669 "unrecognized chunk type: 0x%llx", 670 ~(BTRFS_BLOCK_GROUP_TYPE_MASK | 671 BTRFS_BLOCK_GROUP_PROFILE_MASK) & 672 btrfs_chunk_type(leaf, chunk)); 673 return -EUCLEAN; 674 } 675 676 if (!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) && 677 (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) { 678 chunk_err(leaf, chunk, logical, 679 "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set", 680 type & BTRFS_BLOCK_GROUP_PROFILE_MASK); 681 return -EUCLEAN; 682 } 683 if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) { 684 chunk_err(leaf, chunk, logical, 685 "missing chunk type flag, have 0x%llx one bit must be set in 0x%llx", 686 type, BTRFS_BLOCK_GROUP_TYPE_MASK); 687 return -EUCLEAN; 688 } 689 690 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) && 691 (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) { 692 chunk_err(leaf, chunk, logical, 693 "system chunk with data or metadata type: 0x%llx", 694 type); 695 return -EUCLEAN; 696 } 697 698 features = btrfs_super_incompat_flags(fs_info->super_copy); 699 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 700 mixed = true; 701 702 if (!mixed) { 703 if ((type & BTRFS_BLOCK_GROUP_METADATA) && 704 (type & BTRFS_BLOCK_GROUP_DATA)) { 705 chunk_err(leaf, chunk, logical, 706 "mixed chunk type in non-mixed mode: 0x%llx", type); 707 return -EUCLEAN; 708 } 709 } 710 711 if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) || 712 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) || 713 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) || 714 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) || 715 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) || 716 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) { 717 chunk_err(leaf, chunk, logical, 718 "invalid num_stripes:sub_stripes %u:%u for profile %llu", 719 num_stripes, sub_stripes, 720 type & BTRFS_BLOCK_GROUP_PROFILE_MASK); 721 return -EUCLEAN; 722 } 723 724 return 0; 725 } 726 727 __printf(3, 4) 728 __cold 729 static void dev_item_err(const struct extent_buffer *eb, int slot, 730 const char *fmt, ...) 731 { 732 struct btrfs_key key; 733 struct va_format vaf; 734 va_list args; 735 736 btrfs_item_key_to_cpu(eb, &key, slot); 737 va_start(args, fmt); 738 739 vaf.fmt = fmt; 740 vaf.va = &args; 741 742 btrfs_crit(eb->fs_info, 743 "corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV", 744 btrfs_header_level(eb) == 0 ? "leaf" : "node", 745 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, 746 key.objectid, &vaf); 747 va_end(args); 748 } 749 750 static int check_dev_item(struct extent_buffer *leaf, 751 struct btrfs_key *key, int slot) 752 { 753 struct btrfs_dev_item *ditem; 754 755 if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) { 756 dev_item_err(leaf, slot, 757 "invalid objectid: has=%llu expect=%llu", 758 key->objectid, BTRFS_DEV_ITEMS_OBJECTID); 759 return -EUCLEAN; 760 } 761 ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); 762 if (btrfs_device_id(leaf, ditem) != key->offset) { 763 dev_item_err(leaf, slot, 764 "devid mismatch: key has=%llu item has=%llu", 765 key->offset, btrfs_device_id(leaf, ditem)); 766 return -EUCLEAN; 767 } 768 769 /* 770 * For device total_bytes, we don't have reliable way to check it, as 771 * it can be 0 for device removal. Device size check can only be done 772 * by dev extents check. 773 */ 774 if (btrfs_device_bytes_used(leaf, ditem) > 775 btrfs_device_total_bytes(leaf, ditem)) { 776 dev_item_err(leaf, slot, 777 "invalid bytes used: have %llu expect [0, %llu]", 778 btrfs_device_bytes_used(leaf, ditem), 779 btrfs_device_total_bytes(leaf, ditem)); 780 return -EUCLEAN; 781 } 782 /* 783 * Remaining members like io_align/type/gen/dev_group aren't really 784 * utilized. Skip them to make later usage of them easier. 785 */ 786 return 0; 787 } 788 789 /* Inode item error output has the same format as dir_item_err() */ 790 #define inode_item_err(fs_info, eb, slot, fmt, ...) \ 791 dir_item_err(eb, slot, fmt, __VA_ARGS__) 792 793 static int check_inode_item(struct extent_buffer *leaf, 794 struct btrfs_key *key, int slot) 795 { 796 struct btrfs_fs_info *fs_info = leaf->fs_info; 797 struct btrfs_inode_item *iitem; 798 u64 super_gen = btrfs_super_generation(fs_info->super_copy); 799 u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777); 800 u32 mode; 801 802 if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID || 803 key->objectid > BTRFS_LAST_FREE_OBJECTID) && 804 key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID && 805 key->objectid != BTRFS_FREE_INO_OBJECTID) { 806 generic_err(leaf, slot, 807 "invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu", 808 key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID, 809 BTRFS_FIRST_FREE_OBJECTID, 810 BTRFS_LAST_FREE_OBJECTID, 811 BTRFS_FREE_INO_OBJECTID); 812 return -EUCLEAN; 813 } 814 if (key->offset != 0) { 815 inode_item_err(fs_info, leaf, slot, 816 "invalid key offset: has %llu expect 0", 817 key->offset); 818 return -EUCLEAN; 819 } 820 iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item); 821 822 /* Here we use super block generation + 1 to handle log tree */ 823 if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) { 824 inode_item_err(fs_info, leaf, slot, 825 "invalid inode generation: has %llu expect (0, %llu]", 826 btrfs_inode_generation(leaf, iitem), 827 super_gen + 1); 828 return -EUCLEAN; 829 } 830 /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */ 831 if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) { 832 inode_item_err(fs_info, leaf, slot, 833 "invalid inode generation: has %llu expect [0, %llu]", 834 btrfs_inode_transid(leaf, iitem), super_gen + 1); 835 return -EUCLEAN; 836 } 837 838 /* 839 * For size and nbytes it's better not to be too strict, as for dir 840 * item its size/nbytes can easily get wrong, but doesn't affect 841 * anything in the fs. So here we skip the check. 842 */ 843 mode = btrfs_inode_mode(leaf, iitem); 844 if (mode & ~valid_mask) { 845 inode_item_err(fs_info, leaf, slot, 846 "unknown mode bit detected: 0x%x", 847 mode & ~valid_mask); 848 return -EUCLEAN; 849 } 850 851 /* 852 * S_IFMT is not bit mapped so we can't completely rely on 853 * is_power_of_2/has_single_bit_set, but it can save us from checking 854 * FIFO/CHR/DIR/REG. Only needs to check BLK, LNK and SOCKS 855 */ 856 if (!has_single_bit_set(mode & S_IFMT)) { 857 if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) { 858 inode_item_err(fs_info, leaf, slot, 859 "invalid mode: has 0%o expect valid S_IF* bit(s)", 860 mode & S_IFMT); 861 return -EUCLEAN; 862 } 863 } 864 if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) { 865 inode_item_err(fs_info, leaf, slot, 866 "invalid nlink: has %u expect no more than 1 for dir", 867 btrfs_inode_nlink(leaf, iitem)); 868 return -EUCLEAN; 869 } 870 if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) { 871 inode_item_err(fs_info, leaf, slot, 872 "unknown flags detected: 0x%llx", 873 btrfs_inode_flags(leaf, iitem) & 874 ~BTRFS_INODE_FLAG_MASK); 875 return -EUCLEAN; 876 } 877 return 0; 878 } 879 880 static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, 881 int slot) 882 { 883 struct btrfs_fs_info *fs_info = leaf->fs_info; 884 struct btrfs_root_item ri; 885 const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY | 886 BTRFS_ROOT_SUBVOL_DEAD; 887 888 /* No such tree id */ 889 if (key->objectid == 0) { 890 generic_err(leaf, slot, "invalid root id 0"); 891 return -EUCLEAN; 892 } 893 894 /* 895 * Some older kernel may create ROOT_ITEM with non-zero offset, so here 896 * we only check offset for reloc tree whose key->offset must be a 897 * valid tree. 898 */ 899 if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) { 900 generic_err(leaf, slot, "invalid root id 0 for reloc tree"); 901 return -EUCLEAN; 902 } 903 904 if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) { 905 generic_err(leaf, slot, 906 "invalid root item size, have %u expect %zu", 907 btrfs_item_size_nr(leaf, slot), sizeof(ri)); 908 } 909 910 read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot), 911 sizeof(ri)); 912 913 /* Generation related */ 914 if (btrfs_root_generation(&ri) > 915 btrfs_super_generation(fs_info->super_copy) + 1) { 916 generic_err(leaf, slot, 917 "invalid root generation, have %llu expect (0, %llu]", 918 btrfs_root_generation(&ri), 919 btrfs_super_generation(fs_info->super_copy) + 1); 920 return -EUCLEAN; 921 } 922 if (btrfs_root_generation_v2(&ri) > 923 btrfs_super_generation(fs_info->super_copy) + 1) { 924 generic_err(leaf, slot, 925 "invalid root v2 generation, have %llu expect (0, %llu]", 926 btrfs_root_generation_v2(&ri), 927 btrfs_super_generation(fs_info->super_copy) + 1); 928 return -EUCLEAN; 929 } 930 if (btrfs_root_last_snapshot(&ri) > 931 btrfs_super_generation(fs_info->super_copy) + 1) { 932 generic_err(leaf, slot, 933 "invalid root last_snapshot, have %llu expect (0, %llu]", 934 btrfs_root_last_snapshot(&ri), 935 btrfs_super_generation(fs_info->super_copy) + 1); 936 return -EUCLEAN; 937 } 938 939 /* Alignment and level check */ 940 if (!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize)) { 941 generic_err(leaf, slot, 942 "invalid root bytenr, have %llu expect to be aligned to %u", 943 btrfs_root_bytenr(&ri), fs_info->sectorsize); 944 return -EUCLEAN; 945 } 946 if (btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL) { 947 generic_err(leaf, slot, 948 "invalid root level, have %u expect [0, %u]", 949 btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1); 950 return -EUCLEAN; 951 } 952 if (ri.drop_level >= BTRFS_MAX_LEVEL) { 953 generic_err(leaf, slot, 954 "invalid root level, have %u expect [0, %u]", 955 ri.drop_level, BTRFS_MAX_LEVEL - 1); 956 return -EUCLEAN; 957 } 958 959 /* Flags check */ 960 if (btrfs_root_flags(&ri) & ~valid_root_flags) { 961 generic_err(leaf, slot, 962 "invalid root flags, have 0x%llx expect mask 0x%llx", 963 btrfs_root_flags(&ri), valid_root_flags); 964 return -EUCLEAN; 965 } 966 return 0; 967 } 968 969 __printf(3,4) 970 __cold 971 static void extent_err(const struct extent_buffer *eb, int slot, 972 const char *fmt, ...) 973 { 974 struct btrfs_key key; 975 struct va_format vaf; 976 va_list args; 977 u64 bytenr; 978 u64 len; 979 980 btrfs_item_key_to_cpu(eb, &key, slot); 981 bytenr = key.objectid; 982 if (key.type == BTRFS_METADATA_ITEM_KEY || 983 key.type == BTRFS_TREE_BLOCK_REF_KEY || 984 key.type == BTRFS_SHARED_BLOCK_REF_KEY) 985 len = eb->fs_info->nodesize; 986 else 987 len = key.offset; 988 va_start(args, fmt); 989 990 vaf.fmt = fmt; 991 vaf.va = &args; 992 993 btrfs_crit(eb->fs_info, 994 "corrupt %s: block=%llu slot=%d extent bytenr=%llu len=%llu %pV", 995 btrfs_header_level(eb) == 0 ? "leaf" : "node", 996 eb->start, slot, bytenr, len, &vaf); 997 va_end(args); 998 } 999 1000 static int check_extent_item(struct extent_buffer *leaf, 1001 struct btrfs_key *key, int slot) 1002 { 1003 struct btrfs_fs_info *fs_info = leaf->fs_info; 1004 struct btrfs_extent_item *ei; 1005 bool is_tree_block = false; 1006 unsigned long ptr; /* Current pointer inside inline refs */ 1007 unsigned long end; /* Extent item end */ 1008 const u32 item_size = btrfs_item_size_nr(leaf, slot); 1009 u64 flags; 1010 u64 generation; 1011 u64 total_refs; /* Total refs in btrfs_extent_item */ 1012 u64 inline_refs = 0; /* found total inline refs */ 1013 1014 if (key->type == BTRFS_METADATA_ITEM_KEY && 1015 !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { 1016 generic_err(leaf, slot, 1017 "invalid key type, METADATA_ITEM type invalid when SKINNY_METADATA feature disabled"); 1018 return -EUCLEAN; 1019 } 1020 /* key->objectid is the bytenr for both key types */ 1021 if (!IS_ALIGNED(key->objectid, fs_info->sectorsize)) { 1022 generic_err(leaf, slot, 1023 "invalid key objectid, have %llu expect to be aligned to %u", 1024 key->objectid, fs_info->sectorsize); 1025 return -EUCLEAN; 1026 } 1027 1028 /* key->offset is tree level for METADATA_ITEM_KEY */ 1029 if (key->type == BTRFS_METADATA_ITEM_KEY && 1030 key->offset >= BTRFS_MAX_LEVEL) { 1031 extent_err(leaf, slot, 1032 "invalid tree level, have %llu expect [0, %u]", 1033 key->offset, BTRFS_MAX_LEVEL - 1); 1034 return -EUCLEAN; 1035 } 1036 1037 /* 1038 * EXTENT/METADATA_ITEM consists of: 1039 * 1) One btrfs_extent_item 1040 * Records the total refs, type and generation of the extent. 1041 * 1042 * 2) One btrfs_tree_block_info (for EXTENT_ITEM and tree backref only) 1043 * Records the first key and level of the tree block. 1044 * 1045 * 2) Zero or more btrfs_extent_inline_ref(s) 1046 * Each inline ref has one btrfs_extent_inline_ref shows: 1047 * 2.1) The ref type, one of the 4 1048 * TREE_BLOCK_REF Tree block only 1049 * SHARED_BLOCK_REF Tree block only 1050 * EXTENT_DATA_REF Data only 1051 * SHARED_DATA_REF Data only 1052 * 2.2) Ref type specific data 1053 * Either using btrfs_extent_inline_ref::offset, or specific 1054 * data structure. 1055 */ 1056 if (item_size < sizeof(*ei)) { 1057 extent_err(leaf, slot, 1058 "invalid item size, have %u expect [%zu, %u)", 1059 item_size, sizeof(*ei), 1060 BTRFS_LEAF_DATA_SIZE(fs_info)); 1061 return -EUCLEAN; 1062 } 1063 end = item_size + btrfs_item_ptr_offset(leaf, slot); 1064 1065 /* Checks against extent_item */ 1066 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 1067 flags = btrfs_extent_flags(leaf, ei); 1068 total_refs = btrfs_extent_refs(leaf, ei); 1069 generation = btrfs_extent_generation(leaf, ei); 1070 if (generation > btrfs_super_generation(fs_info->super_copy) + 1) { 1071 extent_err(leaf, slot, 1072 "invalid generation, have %llu expect (0, %llu]", 1073 generation, 1074 btrfs_super_generation(fs_info->super_copy) + 1); 1075 return -EUCLEAN; 1076 } 1077 if (!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA | 1078 BTRFS_EXTENT_FLAG_TREE_BLOCK))) { 1079 extent_err(leaf, slot, 1080 "invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx", 1081 flags, BTRFS_EXTENT_FLAG_DATA | 1082 BTRFS_EXTENT_FLAG_TREE_BLOCK); 1083 return -EUCLEAN; 1084 } 1085 is_tree_block = !!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK); 1086 if (is_tree_block) { 1087 if (key->type == BTRFS_EXTENT_ITEM_KEY && 1088 key->offset != fs_info->nodesize) { 1089 extent_err(leaf, slot, 1090 "invalid extent length, have %llu expect %u", 1091 key->offset, fs_info->nodesize); 1092 return -EUCLEAN; 1093 } 1094 } else { 1095 if (key->type != BTRFS_EXTENT_ITEM_KEY) { 1096 extent_err(leaf, slot, 1097 "invalid key type, have %u expect %u for data backref", 1098 key->type, BTRFS_EXTENT_ITEM_KEY); 1099 return -EUCLEAN; 1100 } 1101 if (!IS_ALIGNED(key->offset, fs_info->sectorsize)) { 1102 extent_err(leaf, slot, 1103 "invalid extent length, have %llu expect aligned to %u", 1104 key->offset, fs_info->sectorsize); 1105 return -EUCLEAN; 1106 } 1107 } 1108 ptr = (unsigned long)(struct btrfs_extent_item *)(ei + 1); 1109 1110 /* Check the special case of btrfs_tree_block_info */ 1111 if (is_tree_block && key->type != BTRFS_METADATA_ITEM_KEY) { 1112 struct btrfs_tree_block_info *info; 1113 1114 info = (struct btrfs_tree_block_info *)ptr; 1115 if (btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL) { 1116 extent_err(leaf, slot, 1117 "invalid tree block info level, have %u expect [0, %u]", 1118 btrfs_tree_block_level(leaf, info), 1119 BTRFS_MAX_LEVEL - 1); 1120 return -EUCLEAN; 1121 } 1122 ptr = (unsigned long)(struct btrfs_tree_block_info *)(info + 1); 1123 } 1124 1125 /* Check inline refs */ 1126 while (ptr < end) { 1127 struct btrfs_extent_inline_ref *iref; 1128 struct btrfs_extent_data_ref *dref; 1129 struct btrfs_shared_data_ref *sref; 1130 u64 dref_offset; 1131 u64 inline_offset; 1132 u8 inline_type; 1133 1134 if (ptr + sizeof(*iref) > end) { 1135 extent_err(leaf, slot, 1136 "inline ref item overflows extent item, ptr %lu iref size %zu end %lu", 1137 ptr, sizeof(*iref), end); 1138 return -EUCLEAN; 1139 } 1140 iref = (struct btrfs_extent_inline_ref *)ptr; 1141 inline_type = btrfs_extent_inline_ref_type(leaf, iref); 1142 inline_offset = btrfs_extent_inline_ref_offset(leaf, iref); 1143 if (ptr + btrfs_extent_inline_ref_size(inline_type) > end) { 1144 extent_err(leaf, slot, 1145 "inline ref item overflows extent item, ptr %lu iref size %u end %lu", 1146 ptr, inline_type, end); 1147 return -EUCLEAN; 1148 } 1149 1150 switch (inline_type) { 1151 /* inline_offset is subvolid of the owner, no need to check */ 1152 case BTRFS_TREE_BLOCK_REF_KEY: 1153 inline_refs++; 1154 break; 1155 /* Contains parent bytenr */ 1156 case BTRFS_SHARED_BLOCK_REF_KEY: 1157 if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) { 1158 extent_err(leaf, slot, 1159 "invalid tree parent bytenr, have %llu expect aligned to %u", 1160 inline_offset, fs_info->sectorsize); 1161 return -EUCLEAN; 1162 } 1163 inline_refs++; 1164 break; 1165 /* 1166 * Contains owner subvolid, owner key objectid, adjusted offset. 1167 * The only obvious corruption can happen in that offset. 1168 */ 1169 case BTRFS_EXTENT_DATA_REF_KEY: 1170 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1171 dref_offset = btrfs_extent_data_ref_offset(leaf, dref); 1172 if (!IS_ALIGNED(dref_offset, fs_info->sectorsize)) { 1173 extent_err(leaf, slot, 1174 "invalid data ref offset, have %llu expect aligned to %u", 1175 dref_offset, fs_info->sectorsize); 1176 return -EUCLEAN; 1177 } 1178 inline_refs += btrfs_extent_data_ref_count(leaf, dref); 1179 break; 1180 /* Contains parent bytenr and ref count */ 1181 case BTRFS_SHARED_DATA_REF_KEY: 1182 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1183 if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) { 1184 extent_err(leaf, slot, 1185 "invalid data parent bytenr, have %llu expect aligned to %u", 1186 inline_offset, fs_info->sectorsize); 1187 return -EUCLEAN; 1188 } 1189 inline_refs += btrfs_shared_data_ref_count(leaf, sref); 1190 break; 1191 default: 1192 extent_err(leaf, slot, "unknown inline ref type: %u", 1193 inline_type); 1194 return -EUCLEAN; 1195 } 1196 ptr += btrfs_extent_inline_ref_size(inline_type); 1197 } 1198 /* No padding is allowed */ 1199 if (ptr != end) { 1200 extent_err(leaf, slot, 1201 "invalid extent item size, padding bytes found"); 1202 return -EUCLEAN; 1203 } 1204 1205 /* Finally, check the inline refs against total refs */ 1206 if (inline_refs > total_refs) { 1207 extent_err(leaf, slot, 1208 "invalid extent refs, have %llu expect >= inline %llu", 1209 total_refs, inline_refs); 1210 return -EUCLEAN; 1211 } 1212 return 0; 1213 } 1214 1215 static int check_simple_keyed_refs(struct extent_buffer *leaf, 1216 struct btrfs_key *key, int slot) 1217 { 1218 u32 expect_item_size = 0; 1219 1220 if (key->type == BTRFS_SHARED_DATA_REF_KEY) 1221 expect_item_size = sizeof(struct btrfs_shared_data_ref); 1222 1223 if (btrfs_item_size_nr(leaf, slot) != expect_item_size) { 1224 generic_err(leaf, slot, 1225 "invalid item size, have %u expect %u for key type %u", 1226 btrfs_item_size_nr(leaf, slot), 1227 expect_item_size, key->type); 1228 return -EUCLEAN; 1229 } 1230 if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) { 1231 generic_err(leaf, slot, 1232 "invalid key objectid for shared block ref, have %llu expect aligned to %u", 1233 key->objectid, leaf->fs_info->sectorsize); 1234 return -EUCLEAN; 1235 } 1236 if (key->type != BTRFS_TREE_BLOCK_REF_KEY && 1237 !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize)) { 1238 extent_err(leaf, slot, 1239 "invalid tree parent bytenr, have %llu expect aligned to %u", 1240 key->offset, leaf->fs_info->sectorsize); 1241 return -EUCLEAN; 1242 } 1243 return 0; 1244 } 1245 1246 static int check_extent_data_ref(struct extent_buffer *leaf, 1247 struct btrfs_key *key, int slot) 1248 { 1249 struct btrfs_extent_data_ref *dref; 1250 unsigned long ptr = btrfs_item_ptr_offset(leaf, slot); 1251 const unsigned long end = ptr + btrfs_item_size_nr(leaf, slot); 1252 1253 if (btrfs_item_size_nr(leaf, slot) % sizeof(*dref) != 0) { 1254 generic_err(leaf, slot, 1255 "invalid item size, have %u expect aligned to %zu for key type %u", 1256 btrfs_item_size_nr(leaf, slot), 1257 sizeof(*dref), key->type); 1258 } 1259 if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) { 1260 generic_err(leaf, slot, 1261 "invalid key objectid for shared block ref, have %llu expect aligned to %u", 1262 key->objectid, leaf->fs_info->sectorsize); 1263 return -EUCLEAN; 1264 } 1265 for (; ptr < end; ptr += sizeof(*dref)) { 1266 u64 root_objectid; 1267 u64 owner; 1268 u64 offset; 1269 u64 hash; 1270 1271 dref = (struct btrfs_extent_data_ref *)ptr; 1272 root_objectid = btrfs_extent_data_ref_root(leaf, dref); 1273 owner = btrfs_extent_data_ref_objectid(leaf, dref); 1274 offset = btrfs_extent_data_ref_offset(leaf, dref); 1275 hash = hash_extent_data_ref(root_objectid, owner, offset); 1276 if (hash != key->offset) { 1277 extent_err(leaf, slot, 1278 "invalid extent data ref hash, item has 0x%016llx key has 0x%016llx", 1279 hash, key->offset); 1280 return -EUCLEAN; 1281 } 1282 if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) { 1283 extent_err(leaf, slot, 1284 "invalid extent data backref offset, have %llu expect aligned to %u", 1285 offset, leaf->fs_info->sectorsize); 1286 } 1287 } 1288 return 0; 1289 } 1290 1291 #define inode_ref_err(fs_info, eb, slot, fmt, args...) \ 1292 inode_item_err(fs_info, eb, slot, fmt, ##args) 1293 static int check_inode_ref(struct extent_buffer *leaf, 1294 struct btrfs_key *key, struct btrfs_key *prev_key, 1295 int slot) 1296 { 1297 struct btrfs_inode_ref *iref; 1298 unsigned long ptr; 1299 unsigned long end; 1300 1301 if (!check_prev_ino(leaf, key, slot, prev_key)) 1302 return -EUCLEAN; 1303 /* namelen can't be 0, so item_size == sizeof() is also invalid */ 1304 if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) { 1305 inode_ref_err(fs_info, leaf, slot, 1306 "invalid item size, have %u expect (%zu, %u)", 1307 btrfs_item_size_nr(leaf, slot), 1308 sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info)); 1309 return -EUCLEAN; 1310 } 1311 1312 ptr = btrfs_item_ptr_offset(leaf, slot); 1313 end = ptr + btrfs_item_size_nr(leaf, slot); 1314 while (ptr < end) { 1315 u16 namelen; 1316 1317 if (ptr + sizeof(iref) > end) { 1318 inode_ref_err(fs_info, leaf, slot, 1319 "inode ref overflow, ptr %lu end %lu inode_ref_size %zu", 1320 ptr, end, sizeof(iref)); 1321 return -EUCLEAN; 1322 } 1323 1324 iref = (struct btrfs_inode_ref *)ptr; 1325 namelen = btrfs_inode_ref_name_len(leaf, iref); 1326 if (ptr + sizeof(*iref) + namelen > end) { 1327 inode_ref_err(fs_info, leaf, slot, 1328 "inode ref overflow, ptr %lu end %lu namelen %u", 1329 ptr, end, namelen); 1330 return -EUCLEAN; 1331 } 1332 1333 /* 1334 * NOTE: In theory we should record all found index numbers 1335 * to find any duplicated indexes, but that will be too time 1336 * consuming for inodes with too many hard links. 1337 */ 1338 ptr += sizeof(*iref) + namelen; 1339 } 1340 return 0; 1341 } 1342 1343 /* 1344 * Common point to switch the item-specific validation. 1345 */ 1346 static int check_leaf_item(struct extent_buffer *leaf, 1347 struct btrfs_key *key, int slot, 1348 struct btrfs_key *prev_key) 1349 { 1350 int ret = 0; 1351 struct btrfs_chunk *chunk; 1352 1353 switch (key->type) { 1354 case BTRFS_EXTENT_DATA_KEY: 1355 ret = check_extent_data_item(leaf, key, slot, prev_key); 1356 break; 1357 case BTRFS_EXTENT_CSUM_KEY: 1358 ret = check_csum_item(leaf, key, slot); 1359 break; 1360 case BTRFS_DIR_ITEM_KEY: 1361 case BTRFS_DIR_INDEX_KEY: 1362 case BTRFS_XATTR_ITEM_KEY: 1363 ret = check_dir_item(leaf, key, prev_key, slot); 1364 break; 1365 case BTRFS_INODE_REF_KEY: 1366 ret = check_inode_ref(leaf, key, prev_key, slot); 1367 break; 1368 case BTRFS_BLOCK_GROUP_ITEM_KEY: 1369 ret = check_block_group_item(leaf, key, slot); 1370 break; 1371 case BTRFS_CHUNK_ITEM_KEY: 1372 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 1373 ret = btrfs_check_chunk_valid(leaf, chunk, key->offset); 1374 break; 1375 case BTRFS_DEV_ITEM_KEY: 1376 ret = check_dev_item(leaf, key, slot); 1377 break; 1378 case BTRFS_INODE_ITEM_KEY: 1379 ret = check_inode_item(leaf, key, slot); 1380 break; 1381 case BTRFS_ROOT_ITEM_KEY: 1382 ret = check_root_item(leaf, key, slot); 1383 break; 1384 case BTRFS_EXTENT_ITEM_KEY: 1385 case BTRFS_METADATA_ITEM_KEY: 1386 ret = check_extent_item(leaf, key, slot); 1387 break; 1388 case BTRFS_TREE_BLOCK_REF_KEY: 1389 case BTRFS_SHARED_DATA_REF_KEY: 1390 case BTRFS_SHARED_BLOCK_REF_KEY: 1391 ret = check_simple_keyed_refs(leaf, key, slot); 1392 break; 1393 case BTRFS_EXTENT_DATA_REF_KEY: 1394 ret = check_extent_data_ref(leaf, key, slot); 1395 break; 1396 } 1397 return ret; 1398 } 1399 1400 static int check_leaf(struct extent_buffer *leaf, bool check_item_data) 1401 { 1402 struct btrfs_fs_info *fs_info = leaf->fs_info; 1403 /* No valid key type is 0, so all key should be larger than this key */ 1404 struct btrfs_key prev_key = {0, 0, 0}; 1405 struct btrfs_key key; 1406 u32 nritems = btrfs_header_nritems(leaf); 1407 int slot; 1408 1409 if (btrfs_header_level(leaf) != 0) { 1410 generic_err(leaf, 0, 1411 "invalid level for leaf, have %d expect 0", 1412 btrfs_header_level(leaf)); 1413 return -EUCLEAN; 1414 } 1415 1416 /* 1417 * Extent buffers from a relocation tree have a owner field that 1418 * corresponds to the subvolume tree they are based on. So just from an 1419 * extent buffer alone we can not find out what is the id of the 1420 * corresponding subvolume tree, so we can not figure out if the extent 1421 * buffer corresponds to the root of the relocation tree or not. So 1422 * skip this check for relocation trees. 1423 */ 1424 if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) { 1425 u64 owner = btrfs_header_owner(leaf); 1426 1427 /* These trees must never be empty */ 1428 if (owner == BTRFS_ROOT_TREE_OBJECTID || 1429 owner == BTRFS_CHUNK_TREE_OBJECTID || 1430 owner == BTRFS_EXTENT_TREE_OBJECTID || 1431 owner == BTRFS_DEV_TREE_OBJECTID || 1432 owner == BTRFS_FS_TREE_OBJECTID || 1433 owner == BTRFS_DATA_RELOC_TREE_OBJECTID) { 1434 generic_err(leaf, 0, 1435 "invalid root, root %llu must never be empty", 1436 owner); 1437 return -EUCLEAN; 1438 } 1439 /* Unknown tree */ 1440 if (owner == 0) { 1441 generic_err(leaf, 0, 1442 "invalid owner, root 0 is not defined"); 1443 return -EUCLEAN; 1444 } 1445 return 0; 1446 } 1447 1448 if (nritems == 0) 1449 return 0; 1450 1451 /* 1452 * Check the following things to make sure this is a good leaf, and 1453 * leaf users won't need to bother with similar sanity checks: 1454 * 1455 * 1) key ordering 1456 * 2) item offset and size 1457 * No overlap, no hole, all inside the leaf. 1458 * 3) item content 1459 * If possible, do comprehensive sanity check. 1460 * NOTE: All checks must only rely on the item data itself. 1461 */ 1462 for (slot = 0; slot < nritems; slot++) { 1463 u32 item_end_expected; 1464 int ret; 1465 1466 btrfs_item_key_to_cpu(leaf, &key, slot); 1467 1468 /* Make sure the keys are in the right order */ 1469 if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) { 1470 generic_err(leaf, slot, 1471 "bad key order, prev (%llu %u %llu) current (%llu %u %llu)", 1472 prev_key.objectid, prev_key.type, 1473 prev_key.offset, key.objectid, key.type, 1474 key.offset); 1475 return -EUCLEAN; 1476 } 1477 1478 /* 1479 * Make sure the offset and ends are right, remember that the 1480 * item data starts at the end of the leaf and grows towards the 1481 * front. 1482 */ 1483 if (slot == 0) 1484 item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info); 1485 else 1486 item_end_expected = btrfs_item_offset_nr(leaf, 1487 slot - 1); 1488 if (btrfs_item_end_nr(leaf, slot) != item_end_expected) { 1489 generic_err(leaf, slot, 1490 "unexpected item end, have %u expect %u", 1491 btrfs_item_end_nr(leaf, slot), 1492 item_end_expected); 1493 return -EUCLEAN; 1494 } 1495 1496 /* 1497 * Check to make sure that we don't point outside of the leaf, 1498 * just in case all the items are consistent to each other, but 1499 * all point outside of the leaf. 1500 */ 1501 if (btrfs_item_end_nr(leaf, slot) > 1502 BTRFS_LEAF_DATA_SIZE(fs_info)) { 1503 generic_err(leaf, slot, 1504 "slot end outside of leaf, have %u expect range [0, %u]", 1505 btrfs_item_end_nr(leaf, slot), 1506 BTRFS_LEAF_DATA_SIZE(fs_info)); 1507 return -EUCLEAN; 1508 } 1509 1510 /* Also check if the item pointer overlaps with btrfs item. */ 1511 if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) > 1512 btrfs_item_ptr_offset(leaf, slot)) { 1513 generic_err(leaf, slot, 1514 "slot overlaps with its data, item end %lu data start %lu", 1515 btrfs_item_nr_offset(slot) + 1516 sizeof(struct btrfs_item), 1517 btrfs_item_ptr_offset(leaf, slot)); 1518 return -EUCLEAN; 1519 } 1520 1521 if (check_item_data) { 1522 /* 1523 * Check if the item size and content meet other 1524 * criteria 1525 */ 1526 ret = check_leaf_item(leaf, &key, slot, &prev_key); 1527 if (ret < 0) 1528 return ret; 1529 } 1530 1531 prev_key.objectid = key.objectid; 1532 prev_key.type = key.type; 1533 prev_key.offset = key.offset; 1534 } 1535 1536 return 0; 1537 } 1538 1539 int btrfs_check_leaf_full(struct extent_buffer *leaf) 1540 { 1541 return check_leaf(leaf, true); 1542 } 1543 ALLOW_ERROR_INJECTION(btrfs_check_leaf_full, ERRNO); 1544 1545 int btrfs_check_leaf_relaxed(struct extent_buffer *leaf) 1546 { 1547 return check_leaf(leaf, false); 1548 } 1549 1550 int btrfs_check_node(struct extent_buffer *node) 1551 { 1552 struct btrfs_fs_info *fs_info = node->fs_info; 1553 unsigned long nr = btrfs_header_nritems(node); 1554 struct btrfs_key key, next_key; 1555 int slot; 1556 int level = btrfs_header_level(node); 1557 u64 bytenr; 1558 int ret = 0; 1559 1560 if (level <= 0 || level >= BTRFS_MAX_LEVEL) { 1561 generic_err(node, 0, 1562 "invalid level for node, have %d expect [1, %d]", 1563 level, BTRFS_MAX_LEVEL - 1); 1564 return -EUCLEAN; 1565 } 1566 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) { 1567 btrfs_crit(fs_info, 1568 "corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]", 1569 btrfs_header_owner(node), node->start, 1570 nr == 0 ? "small" : "large", nr, 1571 BTRFS_NODEPTRS_PER_BLOCK(fs_info)); 1572 return -EUCLEAN; 1573 } 1574 1575 for (slot = 0; slot < nr - 1; slot++) { 1576 bytenr = btrfs_node_blockptr(node, slot); 1577 btrfs_node_key_to_cpu(node, &key, slot); 1578 btrfs_node_key_to_cpu(node, &next_key, slot + 1); 1579 1580 if (!bytenr) { 1581 generic_err(node, slot, 1582 "invalid NULL node pointer"); 1583 ret = -EUCLEAN; 1584 goto out; 1585 } 1586 if (!IS_ALIGNED(bytenr, fs_info->sectorsize)) { 1587 generic_err(node, slot, 1588 "unaligned pointer, have %llu should be aligned to %u", 1589 bytenr, fs_info->sectorsize); 1590 ret = -EUCLEAN; 1591 goto out; 1592 } 1593 1594 if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) { 1595 generic_err(node, slot, 1596 "bad key order, current (%llu %u %llu) next (%llu %u %llu)", 1597 key.objectid, key.type, key.offset, 1598 next_key.objectid, next_key.type, 1599 next_key.offset); 1600 ret = -EUCLEAN; 1601 goto out; 1602 } 1603 } 1604 out: 1605 return ret; 1606 } 1607 ALLOW_ERROR_INJECTION(btrfs_check_node, ERRNO); 1608