1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Qu Wenruo 2017. All rights reserved. 4 */ 5 6 /* 7 * The module is used to catch unexpected/corrupted tree block data. 8 * Such behavior can be caused either by a fuzzed image or bugs. 9 * 10 * The objective is to do leaf/node validation checks when tree block is read 11 * from disk, and check *every* possible member, so other code won't 12 * need to checking them again. 13 * 14 * Due to the potential and unwanted damage, every checker needs to be 15 * carefully reviewed otherwise so it does not prevent mount of valid images. 16 */ 17 18 #include <linux/types.h> 19 #include <linux/stddef.h> 20 #include <linux/error-injection.h> 21 #include "ctree.h" 22 #include "tree-checker.h" 23 #include "disk-io.h" 24 #include "compression.h" 25 #include "volumes.h" 26 #include "misc.h" 27 28 /* 29 * Error message should follow the following format: 30 * corrupt <type>: <identifier>, <reason>[, <bad_value>] 31 * 32 * @type: leaf or node 33 * @identifier: the necessary info to locate the leaf/node. 34 * It's recommended to decode key.objecitd/offset if it's 35 * meaningful. 36 * @reason: describe the error 37 * @bad_value: optional, it's recommended to output bad value and its 38 * expected value (range). 39 * 40 * Since comma is used to separate the components, only space is allowed 41 * inside each component. 42 */ 43 44 /* 45 * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt. 46 * Allows callers to customize the output. 47 */ 48 __printf(3, 4) 49 __cold 50 static void generic_err(const struct extent_buffer *eb, int slot, 51 const char *fmt, ...) 52 { 53 const struct btrfs_fs_info *fs_info = eb->fs_info; 54 struct va_format vaf; 55 va_list args; 56 57 va_start(args, fmt); 58 59 vaf.fmt = fmt; 60 vaf.va = &args; 61 62 btrfs_crit(fs_info, 63 "corrupt %s: root=%llu block=%llu slot=%d, %pV", 64 btrfs_header_level(eb) == 0 ? "leaf" : "node", 65 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf); 66 va_end(args); 67 } 68 69 /* 70 * Customized reporter for extent data item, since its key objectid and 71 * offset has its own meaning. 72 */ 73 __printf(3, 4) 74 __cold 75 static void file_extent_err(const struct extent_buffer *eb, int slot, 76 const char *fmt, ...) 77 { 78 const struct btrfs_fs_info *fs_info = eb->fs_info; 79 struct btrfs_key key; 80 struct va_format vaf; 81 va_list args; 82 83 btrfs_item_key_to_cpu(eb, &key, slot); 84 va_start(args, fmt); 85 86 vaf.fmt = fmt; 87 vaf.va = &args; 88 89 btrfs_crit(fs_info, 90 "corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV", 91 btrfs_header_level(eb) == 0 ? "leaf" : "node", 92 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, 93 key.objectid, key.offset, &vaf); 94 va_end(args); 95 } 96 97 /* 98 * Return 0 if the btrfs_file_extent_##name is aligned to @alignment 99 * Else return 1 100 */ 101 #define CHECK_FE_ALIGNED(leaf, slot, fi, name, alignment) \ 102 ({ \ 103 if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \ 104 file_extent_err((leaf), (slot), \ 105 "invalid %s for file extent, have %llu, should be aligned to %u", \ 106 (#name), btrfs_file_extent_##name((leaf), (fi)), \ 107 (alignment)); \ 108 (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \ 109 }) 110 111 static u64 file_extent_end(struct extent_buffer *leaf, 112 struct btrfs_key *key, 113 struct btrfs_file_extent_item *extent) 114 { 115 u64 end; 116 u64 len; 117 118 if (btrfs_file_extent_type(leaf, extent) == BTRFS_FILE_EXTENT_INLINE) { 119 len = btrfs_file_extent_ram_bytes(leaf, extent); 120 end = ALIGN(key->offset + len, leaf->fs_info->sectorsize); 121 } else { 122 len = btrfs_file_extent_num_bytes(leaf, extent); 123 end = key->offset + len; 124 } 125 return end; 126 } 127 128 /* 129 * Customized report for dir_item, the only new important information is 130 * key->objectid, which represents inode number 131 */ 132 __printf(3, 4) 133 __cold 134 static void dir_item_err(const struct extent_buffer *eb, int slot, 135 const char *fmt, ...) 136 { 137 const struct btrfs_fs_info *fs_info = eb->fs_info; 138 struct btrfs_key key; 139 struct va_format vaf; 140 va_list args; 141 142 btrfs_item_key_to_cpu(eb, &key, slot); 143 va_start(args, fmt); 144 145 vaf.fmt = fmt; 146 vaf.va = &args; 147 148 btrfs_crit(fs_info, 149 "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV", 150 btrfs_header_level(eb) == 0 ? "leaf" : "node", 151 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, 152 key.objectid, &vaf); 153 va_end(args); 154 } 155 156 /* 157 * This functions checks prev_key->objectid, to ensure current key and prev_key 158 * share the same objectid as inode number. 159 * 160 * This is to detect missing INODE_ITEM in subvolume trees. 161 * 162 * Return true if everything is OK or we don't need to check. 163 * Return false if anything is wrong. 164 */ 165 static bool check_prev_ino(struct extent_buffer *leaf, 166 struct btrfs_key *key, int slot, 167 struct btrfs_key *prev_key) 168 { 169 /* No prev key, skip check */ 170 if (slot == 0) 171 return true; 172 173 /* Only these key->types needs to be checked */ 174 ASSERT(key->type == BTRFS_XATTR_ITEM_KEY || 175 key->type == BTRFS_INODE_REF_KEY || 176 key->type == BTRFS_DIR_INDEX_KEY || 177 key->type == BTRFS_DIR_ITEM_KEY || 178 key->type == BTRFS_EXTENT_DATA_KEY); 179 180 /* 181 * Only subvolume trees along with their reloc trees need this check. 182 * Things like log tree doesn't follow this ino requirement. 183 */ 184 if (!is_fstree(btrfs_header_owner(leaf))) 185 return true; 186 187 if (key->objectid == prev_key->objectid) 188 return true; 189 190 /* Error found */ 191 dir_item_err(leaf, slot, 192 "invalid previous key objectid, have %llu expect %llu", 193 prev_key->objectid, key->objectid); 194 return false; 195 } 196 static int check_extent_data_item(struct extent_buffer *leaf, 197 struct btrfs_key *key, int slot, 198 struct btrfs_key *prev_key) 199 { 200 struct btrfs_fs_info *fs_info = leaf->fs_info; 201 struct btrfs_file_extent_item *fi; 202 u32 sectorsize = fs_info->sectorsize; 203 u32 item_size = btrfs_item_size_nr(leaf, slot); 204 u64 extent_end; 205 206 if (!IS_ALIGNED(key->offset, sectorsize)) { 207 file_extent_err(leaf, slot, 208 "unaligned file_offset for file extent, have %llu should be aligned to %u", 209 key->offset, sectorsize); 210 return -EUCLEAN; 211 } 212 213 /* 214 * Previous key must have the same key->objectid (ino). 215 * It can be XATTR_ITEM, INODE_ITEM or just another EXTENT_DATA. 216 * But if objectids mismatch, it means we have a missing 217 * INODE_ITEM. 218 */ 219 if (!check_prev_ino(leaf, key, slot, prev_key)) 220 return -EUCLEAN; 221 222 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 223 224 if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) { 225 file_extent_err(leaf, slot, 226 "invalid type for file extent, have %u expect range [0, %u]", 227 btrfs_file_extent_type(leaf, fi), 228 BTRFS_FILE_EXTENT_TYPES); 229 return -EUCLEAN; 230 } 231 232 /* 233 * Support for new compression/encryption must introduce incompat flag, 234 * and must be caught in open_ctree(). 235 */ 236 if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) { 237 file_extent_err(leaf, slot, 238 "invalid compression for file extent, have %u expect range [0, %u]", 239 btrfs_file_extent_compression(leaf, fi), 240 BTRFS_COMPRESS_TYPES); 241 return -EUCLEAN; 242 } 243 if (btrfs_file_extent_encryption(leaf, fi)) { 244 file_extent_err(leaf, slot, 245 "invalid encryption for file extent, have %u expect 0", 246 btrfs_file_extent_encryption(leaf, fi)); 247 return -EUCLEAN; 248 } 249 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) { 250 /* Inline extent must have 0 as key offset */ 251 if (key->offset) { 252 file_extent_err(leaf, slot, 253 "invalid file_offset for inline file extent, have %llu expect 0", 254 key->offset); 255 return -EUCLEAN; 256 } 257 258 /* Compressed inline extent has no on-disk size, skip it */ 259 if (btrfs_file_extent_compression(leaf, fi) != 260 BTRFS_COMPRESS_NONE) 261 return 0; 262 263 /* Uncompressed inline extent size must match item size */ 264 if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START + 265 btrfs_file_extent_ram_bytes(leaf, fi)) { 266 file_extent_err(leaf, slot, 267 "invalid ram_bytes for uncompressed inline extent, have %u expect %llu", 268 item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START + 269 btrfs_file_extent_ram_bytes(leaf, fi)); 270 return -EUCLEAN; 271 } 272 return 0; 273 } 274 275 /* Regular or preallocated extent has fixed item size */ 276 if (item_size != sizeof(*fi)) { 277 file_extent_err(leaf, slot, 278 "invalid item size for reg/prealloc file extent, have %u expect %zu", 279 item_size, sizeof(*fi)); 280 return -EUCLEAN; 281 } 282 if (CHECK_FE_ALIGNED(leaf, slot, fi, ram_bytes, sectorsize) || 283 CHECK_FE_ALIGNED(leaf, slot, fi, disk_bytenr, sectorsize) || 284 CHECK_FE_ALIGNED(leaf, slot, fi, disk_num_bytes, sectorsize) || 285 CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) || 286 CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize)) 287 return -EUCLEAN; 288 289 /* Catch extent end overflow */ 290 if (check_add_overflow(btrfs_file_extent_num_bytes(leaf, fi), 291 key->offset, &extent_end)) { 292 file_extent_err(leaf, slot, 293 "extent end overflow, have file offset %llu extent num bytes %llu", 294 key->offset, 295 btrfs_file_extent_num_bytes(leaf, fi)); 296 return -EUCLEAN; 297 } 298 299 /* 300 * Check that no two consecutive file extent items, in the same leaf, 301 * present ranges that overlap each other. 302 */ 303 if (slot > 0 && 304 prev_key->objectid == key->objectid && 305 prev_key->type == BTRFS_EXTENT_DATA_KEY) { 306 struct btrfs_file_extent_item *prev_fi; 307 u64 prev_end; 308 309 prev_fi = btrfs_item_ptr(leaf, slot - 1, 310 struct btrfs_file_extent_item); 311 prev_end = file_extent_end(leaf, prev_key, prev_fi); 312 if (prev_end > key->offset) { 313 file_extent_err(leaf, slot - 1, 314 "file extent end range (%llu) goes beyond start offset (%llu) of the next file extent", 315 prev_end, key->offset); 316 return -EUCLEAN; 317 } 318 } 319 320 return 0; 321 } 322 323 static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key, 324 int slot) 325 { 326 struct btrfs_fs_info *fs_info = leaf->fs_info; 327 u32 sectorsize = fs_info->sectorsize; 328 u32 csumsize = btrfs_super_csum_size(fs_info->super_copy); 329 330 if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) { 331 generic_err(leaf, slot, 332 "invalid key objectid for csum item, have %llu expect %llu", 333 key->objectid, BTRFS_EXTENT_CSUM_OBJECTID); 334 return -EUCLEAN; 335 } 336 if (!IS_ALIGNED(key->offset, sectorsize)) { 337 generic_err(leaf, slot, 338 "unaligned key offset for csum item, have %llu should be aligned to %u", 339 key->offset, sectorsize); 340 return -EUCLEAN; 341 } 342 if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) { 343 generic_err(leaf, slot, 344 "unaligned item size for csum item, have %u should be aligned to %u", 345 btrfs_item_size_nr(leaf, slot), csumsize); 346 return -EUCLEAN; 347 } 348 return 0; 349 } 350 351 static int check_dir_item(struct extent_buffer *leaf, 352 struct btrfs_key *key, struct btrfs_key *prev_key, 353 int slot) 354 { 355 struct btrfs_fs_info *fs_info = leaf->fs_info; 356 struct btrfs_dir_item *di; 357 u32 item_size = btrfs_item_size_nr(leaf, slot); 358 u32 cur = 0; 359 360 if (!check_prev_ino(leaf, key, slot, prev_key)) 361 return -EUCLEAN; 362 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 363 while (cur < item_size) { 364 u32 name_len; 365 u32 data_len; 366 u32 max_name_len; 367 u32 total_size; 368 u32 name_hash; 369 u8 dir_type; 370 371 /* header itself should not cross item boundary */ 372 if (cur + sizeof(*di) > item_size) { 373 dir_item_err(leaf, slot, 374 "dir item header crosses item boundary, have %zu boundary %u", 375 cur + sizeof(*di), item_size); 376 return -EUCLEAN; 377 } 378 379 /* dir type check */ 380 dir_type = btrfs_dir_type(leaf, di); 381 if (dir_type >= BTRFS_FT_MAX) { 382 dir_item_err(leaf, slot, 383 "invalid dir item type, have %u expect [0, %u)", 384 dir_type, BTRFS_FT_MAX); 385 return -EUCLEAN; 386 } 387 388 if (key->type == BTRFS_XATTR_ITEM_KEY && 389 dir_type != BTRFS_FT_XATTR) { 390 dir_item_err(leaf, slot, 391 "invalid dir item type for XATTR key, have %u expect %u", 392 dir_type, BTRFS_FT_XATTR); 393 return -EUCLEAN; 394 } 395 if (dir_type == BTRFS_FT_XATTR && 396 key->type != BTRFS_XATTR_ITEM_KEY) { 397 dir_item_err(leaf, slot, 398 "xattr dir type found for non-XATTR key"); 399 return -EUCLEAN; 400 } 401 if (dir_type == BTRFS_FT_XATTR) 402 max_name_len = XATTR_NAME_MAX; 403 else 404 max_name_len = BTRFS_NAME_LEN; 405 406 /* Name/data length check */ 407 name_len = btrfs_dir_name_len(leaf, di); 408 data_len = btrfs_dir_data_len(leaf, di); 409 if (name_len > max_name_len) { 410 dir_item_err(leaf, slot, 411 "dir item name len too long, have %u max %u", 412 name_len, max_name_len); 413 return -EUCLEAN; 414 } 415 if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info)) { 416 dir_item_err(leaf, slot, 417 "dir item name and data len too long, have %u max %u", 418 name_len + data_len, 419 BTRFS_MAX_XATTR_SIZE(fs_info)); 420 return -EUCLEAN; 421 } 422 423 if (data_len && dir_type != BTRFS_FT_XATTR) { 424 dir_item_err(leaf, slot, 425 "dir item with invalid data len, have %u expect 0", 426 data_len); 427 return -EUCLEAN; 428 } 429 430 total_size = sizeof(*di) + name_len + data_len; 431 432 /* header and name/data should not cross item boundary */ 433 if (cur + total_size > item_size) { 434 dir_item_err(leaf, slot, 435 "dir item data crosses item boundary, have %u boundary %u", 436 cur + total_size, item_size); 437 return -EUCLEAN; 438 } 439 440 /* 441 * Special check for XATTR/DIR_ITEM, as key->offset is name 442 * hash, should match its name 443 */ 444 if (key->type == BTRFS_DIR_ITEM_KEY || 445 key->type == BTRFS_XATTR_ITEM_KEY) { 446 char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)]; 447 448 read_extent_buffer(leaf, namebuf, 449 (unsigned long)(di + 1), name_len); 450 name_hash = btrfs_name_hash(namebuf, name_len); 451 if (key->offset != name_hash) { 452 dir_item_err(leaf, slot, 453 "name hash mismatch with key, have 0x%016x expect 0x%016llx", 454 name_hash, key->offset); 455 return -EUCLEAN; 456 } 457 } 458 cur += total_size; 459 di = (struct btrfs_dir_item *)((void *)di + total_size); 460 } 461 return 0; 462 } 463 464 __printf(3, 4) 465 __cold 466 static void block_group_err(const struct extent_buffer *eb, int slot, 467 const char *fmt, ...) 468 { 469 const struct btrfs_fs_info *fs_info = eb->fs_info; 470 struct btrfs_key key; 471 struct va_format vaf; 472 va_list args; 473 474 btrfs_item_key_to_cpu(eb, &key, slot); 475 va_start(args, fmt); 476 477 vaf.fmt = fmt; 478 vaf.va = &args; 479 480 btrfs_crit(fs_info, 481 "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV", 482 btrfs_header_level(eb) == 0 ? "leaf" : "node", 483 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, 484 key.objectid, key.offset, &vaf); 485 va_end(args); 486 } 487 488 static int check_block_group_item(struct extent_buffer *leaf, 489 struct btrfs_key *key, int slot) 490 { 491 struct btrfs_block_group_item bgi; 492 u32 item_size = btrfs_item_size_nr(leaf, slot); 493 u64 flags; 494 u64 type; 495 496 /* 497 * Here we don't really care about alignment since extent allocator can 498 * handle it. We care more about the size. 499 */ 500 if (key->offset == 0) { 501 block_group_err(leaf, slot, 502 "invalid block group size 0"); 503 return -EUCLEAN; 504 } 505 506 if (item_size != sizeof(bgi)) { 507 block_group_err(leaf, slot, 508 "invalid item size, have %u expect %zu", 509 item_size, sizeof(bgi)); 510 return -EUCLEAN; 511 } 512 513 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 514 sizeof(bgi)); 515 if (btrfs_block_group_chunk_objectid(&bgi) != 516 BTRFS_FIRST_CHUNK_TREE_OBJECTID) { 517 block_group_err(leaf, slot, 518 "invalid block group chunk objectid, have %llu expect %llu", 519 btrfs_block_group_chunk_objectid(&bgi), 520 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 521 return -EUCLEAN; 522 } 523 524 if (btrfs_block_group_used(&bgi) > key->offset) { 525 block_group_err(leaf, slot, 526 "invalid block group used, have %llu expect [0, %llu)", 527 btrfs_block_group_used(&bgi), key->offset); 528 return -EUCLEAN; 529 } 530 531 flags = btrfs_block_group_flags(&bgi); 532 if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) { 533 block_group_err(leaf, slot, 534 "invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set", 535 flags & BTRFS_BLOCK_GROUP_PROFILE_MASK, 536 hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)); 537 return -EUCLEAN; 538 } 539 540 type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 541 if (type != BTRFS_BLOCK_GROUP_DATA && 542 type != BTRFS_BLOCK_GROUP_METADATA && 543 type != BTRFS_BLOCK_GROUP_SYSTEM && 544 type != (BTRFS_BLOCK_GROUP_METADATA | 545 BTRFS_BLOCK_GROUP_DATA)) { 546 block_group_err(leaf, slot, 547 "invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx", 548 type, hweight64(type), 549 BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA, 550 BTRFS_BLOCK_GROUP_SYSTEM, 551 BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA); 552 return -EUCLEAN; 553 } 554 return 0; 555 } 556 557 __printf(4, 5) 558 __cold 559 static void chunk_err(const struct extent_buffer *leaf, 560 const struct btrfs_chunk *chunk, u64 logical, 561 const char *fmt, ...) 562 { 563 const struct btrfs_fs_info *fs_info = leaf->fs_info; 564 bool is_sb; 565 struct va_format vaf; 566 va_list args; 567 int i; 568 int slot = -1; 569 570 /* Only superblock eb is able to have such small offset */ 571 is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET); 572 573 if (!is_sb) { 574 /* 575 * Get the slot number by iterating through all slots, this 576 * would provide better readability. 577 */ 578 for (i = 0; i < btrfs_header_nritems(leaf); i++) { 579 if (btrfs_item_ptr_offset(leaf, i) == 580 (unsigned long)chunk) { 581 slot = i; 582 break; 583 } 584 } 585 } 586 va_start(args, fmt); 587 vaf.fmt = fmt; 588 vaf.va = &args; 589 590 if (is_sb) 591 btrfs_crit(fs_info, 592 "corrupt superblock syschunk array: chunk_start=%llu, %pV", 593 logical, &vaf); 594 else 595 btrfs_crit(fs_info, 596 "corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV", 597 BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot, 598 logical, &vaf); 599 va_end(args); 600 } 601 602 /* 603 * The common chunk check which could also work on super block sys chunk array. 604 * 605 * Return -EUCLEAN if anything is corrupted. 606 * Return 0 if everything is OK. 607 */ 608 int btrfs_check_chunk_valid(struct extent_buffer *leaf, 609 struct btrfs_chunk *chunk, u64 logical) 610 { 611 struct btrfs_fs_info *fs_info = leaf->fs_info; 612 u64 length; 613 u64 stripe_len; 614 u16 num_stripes; 615 u16 sub_stripes; 616 u64 type; 617 u64 features; 618 bool mixed = false; 619 620 length = btrfs_chunk_length(leaf, chunk); 621 stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 622 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 623 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); 624 type = btrfs_chunk_type(leaf, chunk); 625 626 if (!num_stripes) { 627 chunk_err(leaf, chunk, logical, 628 "invalid chunk num_stripes, have %u", num_stripes); 629 return -EUCLEAN; 630 } 631 if (!IS_ALIGNED(logical, fs_info->sectorsize)) { 632 chunk_err(leaf, chunk, logical, 633 "invalid chunk logical, have %llu should aligned to %u", 634 logical, fs_info->sectorsize); 635 return -EUCLEAN; 636 } 637 if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) { 638 chunk_err(leaf, chunk, logical, 639 "invalid chunk sectorsize, have %u expect %u", 640 btrfs_chunk_sector_size(leaf, chunk), 641 fs_info->sectorsize); 642 return -EUCLEAN; 643 } 644 if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) { 645 chunk_err(leaf, chunk, logical, 646 "invalid chunk length, have %llu", length); 647 return -EUCLEAN; 648 } 649 if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) { 650 chunk_err(leaf, chunk, logical, 651 "invalid chunk stripe length: %llu", 652 stripe_len); 653 return -EUCLEAN; 654 } 655 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & 656 type) { 657 chunk_err(leaf, chunk, logical, 658 "unrecognized chunk type: 0x%llx", 659 ~(BTRFS_BLOCK_GROUP_TYPE_MASK | 660 BTRFS_BLOCK_GROUP_PROFILE_MASK) & 661 btrfs_chunk_type(leaf, chunk)); 662 return -EUCLEAN; 663 } 664 665 if (!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) && 666 (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) { 667 chunk_err(leaf, chunk, logical, 668 "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set", 669 type & BTRFS_BLOCK_GROUP_PROFILE_MASK); 670 return -EUCLEAN; 671 } 672 if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) { 673 chunk_err(leaf, chunk, logical, 674 "missing chunk type flag, have 0x%llx one bit must be set in 0x%llx", 675 type, BTRFS_BLOCK_GROUP_TYPE_MASK); 676 return -EUCLEAN; 677 } 678 679 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) && 680 (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) { 681 chunk_err(leaf, chunk, logical, 682 "system chunk with data or metadata type: 0x%llx", 683 type); 684 return -EUCLEAN; 685 } 686 687 features = btrfs_super_incompat_flags(fs_info->super_copy); 688 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 689 mixed = true; 690 691 if (!mixed) { 692 if ((type & BTRFS_BLOCK_GROUP_METADATA) && 693 (type & BTRFS_BLOCK_GROUP_DATA)) { 694 chunk_err(leaf, chunk, logical, 695 "mixed chunk type in non-mixed mode: 0x%llx", type); 696 return -EUCLEAN; 697 } 698 } 699 700 if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) || 701 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) || 702 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) || 703 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) || 704 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) || 705 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) { 706 chunk_err(leaf, chunk, logical, 707 "invalid num_stripes:sub_stripes %u:%u for profile %llu", 708 num_stripes, sub_stripes, 709 type & BTRFS_BLOCK_GROUP_PROFILE_MASK); 710 return -EUCLEAN; 711 } 712 713 return 0; 714 } 715 716 __printf(3, 4) 717 __cold 718 static void dev_item_err(const struct extent_buffer *eb, int slot, 719 const char *fmt, ...) 720 { 721 struct btrfs_key key; 722 struct va_format vaf; 723 va_list args; 724 725 btrfs_item_key_to_cpu(eb, &key, slot); 726 va_start(args, fmt); 727 728 vaf.fmt = fmt; 729 vaf.va = &args; 730 731 btrfs_crit(eb->fs_info, 732 "corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV", 733 btrfs_header_level(eb) == 0 ? "leaf" : "node", 734 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, 735 key.objectid, &vaf); 736 va_end(args); 737 } 738 739 static int check_dev_item(struct extent_buffer *leaf, 740 struct btrfs_key *key, int slot) 741 { 742 struct btrfs_dev_item *ditem; 743 744 if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) { 745 dev_item_err(leaf, slot, 746 "invalid objectid: has=%llu expect=%llu", 747 key->objectid, BTRFS_DEV_ITEMS_OBJECTID); 748 return -EUCLEAN; 749 } 750 ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); 751 if (btrfs_device_id(leaf, ditem) != key->offset) { 752 dev_item_err(leaf, slot, 753 "devid mismatch: key has=%llu item has=%llu", 754 key->offset, btrfs_device_id(leaf, ditem)); 755 return -EUCLEAN; 756 } 757 758 /* 759 * For device total_bytes, we don't have reliable way to check it, as 760 * it can be 0 for device removal. Device size check can only be done 761 * by dev extents check. 762 */ 763 if (btrfs_device_bytes_used(leaf, ditem) > 764 btrfs_device_total_bytes(leaf, ditem)) { 765 dev_item_err(leaf, slot, 766 "invalid bytes used: have %llu expect [0, %llu]", 767 btrfs_device_bytes_used(leaf, ditem), 768 btrfs_device_total_bytes(leaf, ditem)); 769 return -EUCLEAN; 770 } 771 /* 772 * Remaining members like io_align/type/gen/dev_group aren't really 773 * utilized. Skip them to make later usage of them easier. 774 */ 775 return 0; 776 } 777 778 /* Inode item error output has the same format as dir_item_err() */ 779 #define inode_item_err(fs_info, eb, slot, fmt, ...) \ 780 dir_item_err(eb, slot, fmt, __VA_ARGS__) 781 782 static int check_inode_item(struct extent_buffer *leaf, 783 struct btrfs_key *key, int slot) 784 { 785 struct btrfs_fs_info *fs_info = leaf->fs_info; 786 struct btrfs_inode_item *iitem; 787 u64 super_gen = btrfs_super_generation(fs_info->super_copy); 788 u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777); 789 u32 mode; 790 791 if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID || 792 key->objectid > BTRFS_LAST_FREE_OBJECTID) && 793 key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID && 794 key->objectid != BTRFS_FREE_INO_OBJECTID) { 795 generic_err(leaf, slot, 796 "invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu", 797 key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID, 798 BTRFS_FIRST_FREE_OBJECTID, 799 BTRFS_LAST_FREE_OBJECTID, 800 BTRFS_FREE_INO_OBJECTID); 801 return -EUCLEAN; 802 } 803 if (key->offset != 0) { 804 inode_item_err(fs_info, leaf, slot, 805 "invalid key offset: has %llu expect 0", 806 key->offset); 807 return -EUCLEAN; 808 } 809 iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item); 810 811 /* Here we use super block generation + 1 to handle log tree */ 812 if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) { 813 inode_item_err(fs_info, leaf, slot, 814 "invalid inode generation: has %llu expect (0, %llu]", 815 btrfs_inode_generation(leaf, iitem), 816 super_gen + 1); 817 return -EUCLEAN; 818 } 819 /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */ 820 if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) { 821 inode_item_err(fs_info, leaf, slot, 822 "invalid inode generation: has %llu expect [0, %llu]", 823 btrfs_inode_transid(leaf, iitem), super_gen + 1); 824 return -EUCLEAN; 825 } 826 827 /* 828 * For size and nbytes it's better not to be too strict, as for dir 829 * item its size/nbytes can easily get wrong, but doesn't affect 830 * anything in the fs. So here we skip the check. 831 */ 832 mode = btrfs_inode_mode(leaf, iitem); 833 if (mode & ~valid_mask) { 834 inode_item_err(fs_info, leaf, slot, 835 "unknown mode bit detected: 0x%x", 836 mode & ~valid_mask); 837 return -EUCLEAN; 838 } 839 840 /* 841 * S_IFMT is not bit mapped so we can't completely rely on 842 * is_power_of_2/has_single_bit_set, but it can save us from checking 843 * FIFO/CHR/DIR/REG. Only needs to check BLK, LNK and SOCKS 844 */ 845 if (!has_single_bit_set(mode & S_IFMT)) { 846 if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) { 847 inode_item_err(fs_info, leaf, slot, 848 "invalid mode: has 0%o expect valid S_IF* bit(s)", 849 mode & S_IFMT); 850 return -EUCLEAN; 851 } 852 } 853 if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) { 854 inode_item_err(fs_info, leaf, slot, 855 "invalid nlink: has %u expect no more than 1 for dir", 856 btrfs_inode_nlink(leaf, iitem)); 857 return -EUCLEAN; 858 } 859 if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) { 860 inode_item_err(fs_info, leaf, slot, 861 "unknown flags detected: 0x%llx", 862 btrfs_inode_flags(leaf, iitem) & 863 ~BTRFS_INODE_FLAG_MASK); 864 return -EUCLEAN; 865 } 866 return 0; 867 } 868 869 static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, 870 int slot) 871 { 872 struct btrfs_fs_info *fs_info = leaf->fs_info; 873 struct btrfs_root_item ri; 874 const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY | 875 BTRFS_ROOT_SUBVOL_DEAD; 876 877 /* No such tree id */ 878 if (key->objectid == 0) { 879 generic_err(leaf, slot, "invalid root id 0"); 880 return -EUCLEAN; 881 } 882 883 /* 884 * Some older kernel may create ROOT_ITEM with non-zero offset, so here 885 * we only check offset for reloc tree whose key->offset must be a 886 * valid tree. 887 */ 888 if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) { 889 generic_err(leaf, slot, "invalid root id 0 for reloc tree"); 890 return -EUCLEAN; 891 } 892 893 if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) { 894 generic_err(leaf, slot, 895 "invalid root item size, have %u expect %zu", 896 btrfs_item_size_nr(leaf, slot), sizeof(ri)); 897 } 898 899 read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot), 900 sizeof(ri)); 901 902 /* Generation related */ 903 if (btrfs_root_generation(&ri) > 904 btrfs_super_generation(fs_info->super_copy) + 1) { 905 generic_err(leaf, slot, 906 "invalid root generation, have %llu expect (0, %llu]", 907 btrfs_root_generation(&ri), 908 btrfs_super_generation(fs_info->super_copy) + 1); 909 return -EUCLEAN; 910 } 911 if (btrfs_root_generation_v2(&ri) > 912 btrfs_super_generation(fs_info->super_copy) + 1) { 913 generic_err(leaf, slot, 914 "invalid root v2 generation, have %llu expect (0, %llu]", 915 btrfs_root_generation_v2(&ri), 916 btrfs_super_generation(fs_info->super_copy) + 1); 917 return -EUCLEAN; 918 } 919 if (btrfs_root_last_snapshot(&ri) > 920 btrfs_super_generation(fs_info->super_copy) + 1) { 921 generic_err(leaf, slot, 922 "invalid root last_snapshot, have %llu expect (0, %llu]", 923 btrfs_root_last_snapshot(&ri), 924 btrfs_super_generation(fs_info->super_copy) + 1); 925 return -EUCLEAN; 926 } 927 928 /* Alignment and level check */ 929 if (!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize)) { 930 generic_err(leaf, slot, 931 "invalid root bytenr, have %llu expect to be aligned to %u", 932 btrfs_root_bytenr(&ri), fs_info->sectorsize); 933 return -EUCLEAN; 934 } 935 if (btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL) { 936 generic_err(leaf, slot, 937 "invalid root level, have %u expect [0, %u]", 938 btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1); 939 return -EUCLEAN; 940 } 941 if (ri.drop_level >= BTRFS_MAX_LEVEL) { 942 generic_err(leaf, slot, 943 "invalid root level, have %u expect [0, %u]", 944 ri.drop_level, BTRFS_MAX_LEVEL - 1); 945 return -EUCLEAN; 946 } 947 948 /* Flags check */ 949 if (btrfs_root_flags(&ri) & ~valid_root_flags) { 950 generic_err(leaf, slot, 951 "invalid root flags, have 0x%llx expect mask 0x%llx", 952 btrfs_root_flags(&ri), valid_root_flags); 953 return -EUCLEAN; 954 } 955 return 0; 956 } 957 958 __printf(3,4) 959 __cold 960 static void extent_err(const struct extent_buffer *eb, int slot, 961 const char *fmt, ...) 962 { 963 struct btrfs_key key; 964 struct va_format vaf; 965 va_list args; 966 u64 bytenr; 967 u64 len; 968 969 btrfs_item_key_to_cpu(eb, &key, slot); 970 bytenr = key.objectid; 971 if (key.type == BTRFS_METADATA_ITEM_KEY || 972 key.type == BTRFS_TREE_BLOCK_REF_KEY || 973 key.type == BTRFS_SHARED_BLOCK_REF_KEY) 974 len = eb->fs_info->nodesize; 975 else 976 len = key.offset; 977 va_start(args, fmt); 978 979 vaf.fmt = fmt; 980 vaf.va = &args; 981 982 btrfs_crit(eb->fs_info, 983 "corrupt %s: block=%llu slot=%d extent bytenr=%llu len=%llu %pV", 984 btrfs_header_level(eb) == 0 ? "leaf" : "node", 985 eb->start, slot, bytenr, len, &vaf); 986 va_end(args); 987 } 988 989 static int check_extent_item(struct extent_buffer *leaf, 990 struct btrfs_key *key, int slot) 991 { 992 struct btrfs_fs_info *fs_info = leaf->fs_info; 993 struct btrfs_extent_item *ei; 994 bool is_tree_block = false; 995 unsigned long ptr; /* Current pointer inside inline refs */ 996 unsigned long end; /* Extent item end */ 997 const u32 item_size = btrfs_item_size_nr(leaf, slot); 998 u64 flags; 999 u64 generation; 1000 u64 total_refs; /* Total refs in btrfs_extent_item */ 1001 u64 inline_refs = 0; /* found total inline refs */ 1002 1003 if (key->type == BTRFS_METADATA_ITEM_KEY && 1004 !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { 1005 generic_err(leaf, slot, 1006 "invalid key type, METADATA_ITEM type invalid when SKINNY_METADATA feature disabled"); 1007 return -EUCLEAN; 1008 } 1009 /* key->objectid is the bytenr for both key types */ 1010 if (!IS_ALIGNED(key->objectid, fs_info->sectorsize)) { 1011 generic_err(leaf, slot, 1012 "invalid key objectid, have %llu expect to be aligned to %u", 1013 key->objectid, fs_info->sectorsize); 1014 return -EUCLEAN; 1015 } 1016 1017 /* key->offset is tree level for METADATA_ITEM_KEY */ 1018 if (key->type == BTRFS_METADATA_ITEM_KEY && 1019 key->offset >= BTRFS_MAX_LEVEL) { 1020 extent_err(leaf, slot, 1021 "invalid tree level, have %llu expect [0, %u]", 1022 key->offset, BTRFS_MAX_LEVEL - 1); 1023 return -EUCLEAN; 1024 } 1025 1026 /* 1027 * EXTENT/METADATA_ITEM consists of: 1028 * 1) One btrfs_extent_item 1029 * Records the total refs, type and generation of the extent. 1030 * 1031 * 2) One btrfs_tree_block_info (for EXTENT_ITEM and tree backref only) 1032 * Records the first key and level of the tree block. 1033 * 1034 * 2) Zero or more btrfs_extent_inline_ref(s) 1035 * Each inline ref has one btrfs_extent_inline_ref shows: 1036 * 2.1) The ref type, one of the 4 1037 * TREE_BLOCK_REF Tree block only 1038 * SHARED_BLOCK_REF Tree block only 1039 * EXTENT_DATA_REF Data only 1040 * SHARED_DATA_REF Data only 1041 * 2.2) Ref type specific data 1042 * Either using btrfs_extent_inline_ref::offset, or specific 1043 * data structure. 1044 */ 1045 if (item_size < sizeof(*ei)) { 1046 extent_err(leaf, slot, 1047 "invalid item size, have %u expect [%zu, %u)", 1048 item_size, sizeof(*ei), 1049 BTRFS_LEAF_DATA_SIZE(fs_info)); 1050 return -EUCLEAN; 1051 } 1052 end = item_size + btrfs_item_ptr_offset(leaf, slot); 1053 1054 /* Checks against extent_item */ 1055 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 1056 flags = btrfs_extent_flags(leaf, ei); 1057 total_refs = btrfs_extent_refs(leaf, ei); 1058 generation = btrfs_extent_generation(leaf, ei); 1059 if (generation > btrfs_super_generation(fs_info->super_copy) + 1) { 1060 extent_err(leaf, slot, 1061 "invalid generation, have %llu expect (0, %llu]", 1062 generation, 1063 btrfs_super_generation(fs_info->super_copy) + 1); 1064 return -EUCLEAN; 1065 } 1066 if (!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA | 1067 BTRFS_EXTENT_FLAG_TREE_BLOCK))) { 1068 extent_err(leaf, slot, 1069 "invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx", 1070 flags, BTRFS_EXTENT_FLAG_DATA | 1071 BTRFS_EXTENT_FLAG_TREE_BLOCK); 1072 return -EUCLEAN; 1073 } 1074 is_tree_block = !!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK); 1075 if (is_tree_block) { 1076 if (key->type == BTRFS_EXTENT_ITEM_KEY && 1077 key->offset != fs_info->nodesize) { 1078 extent_err(leaf, slot, 1079 "invalid extent length, have %llu expect %u", 1080 key->offset, fs_info->nodesize); 1081 return -EUCLEAN; 1082 } 1083 } else { 1084 if (key->type != BTRFS_EXTENT_ITEM_KEY) { 1085 extent_err(leaf, slot, 1086 "invalid key type, have %u expect %u for data backref", 1087 key->type, BTRFS_EXTENT_ITEM_KEY); 1088 return -EUCLEAN; 1089 } 1090 if (!IS_ALIGNED(key->offset, fs_info->sectorsize)) { 1091 extent_err(leaf, slot, 1092 "invalid extent length, have %llu expect aligned to %u", 1093 key->offset, fs_info->sectorsize); 1094 return -EUCLEAN; 1095 } 1096 } 1097 ptr = (unsigned long)(struct btrfs_extent_item *)(ei + 1); 1098 1099 /* Check the special case of btrfs_tree_block_info */ 1100 if (is_tree_block && key->type != BTRFS_METADATA_ITEM_KEY) { 1101 struct btrfs_tree_block_info *info; 1102 1103 info = (struct btrfs_tree_block_info *)ptr; 1104 if (btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL) { 1105 extent_err(leaf, slot, 1106 "invalid tree block info level, have %u expect [0, %u]", 1107 btrfs_tree_block_level(leaf, info), 1108 BTRFS_MAX_LEVEL - 1); 1109 return -EUCLEAN; 1110 } 1111 ptr = (unsigned long)(struct btrfs_tree_block_info *)(info + 1); 1112 } 1113 1114 /* Check inline refs */ 1115 while (ptr < end) { 1116 struct btrfs_extent_inline_ref *iref; 1117 struct btrfs_extent_data_ref *dref; 1118 struct btrfs_shared_data_ref *sref; 1119 u64 dref_offset; 1120 u64 inline_offset; 1121 u8 inline_type; 1122 1123 if (ptr + sizeof(*iref) > end) { 1124 extent_err(leaf, slot, 1125 "inline ref item overflows extent item, ptr %lu iref size %zu end %lu", 1126 ptr, sizeof(*iref), end); 1127 return -EUCLEAN; 1128 } 1129 iref = (struct btrfs_extent_inline_ref *)ptr; 1130 inline_type = btrfs_extent_inline_ref_type(leaf, iref); 1131 inline_offset = btrfs_extent_inline_ref_offset(leaf, iref); 1132 if (ptr + btrfs_extent_inline_ref_size(inline_type) > end) { 1133 extent_err(leaf, slot, 1134 "inline ref item overflows extent item, ptr %lu iref size %u end %lu", 1135 ptr, inline_type, end); 1136 return -EUCLEAN; 1137 } 1138 1139 switch (inline_type) { 1140 /* inline_offset is subvolid of the owner, no need to check */ 1141 case BTRFS_TREE_BLOCK_REF_KEY: 1142 inline_refs++; 1143 break; 1144 /* Contains parent bytenr */ 1145 case BTRFS_SHARED_BLOCK_REF_KEY: 1146 if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) { 1147 extent_err(leaf, slot, 1148 "invalid tree parent bytenr, have %llu expect aligned to %u", 1149 inline_offset, fs_info->sectorsize); 1150 return -EUCLEAN; 1151 } 1152 inline_refs++; 1153 break; 1154 /* 1155 * Contains owner subvolid, owner key objectid, adjusted offset. 1156 * The only obvious corruption can happen in that offset. 1157 */ 1158 case BTRFS_EXTENT_DATA_REF_KEY: 1159 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1160 dref_offset = btrfs_extent_data_ref_offset(leaf, dref); 1161 if (!IS_ALIGNED(dref_offset, fs_info->sectorsize)) { 1162 extent_err(leaf, slot, 1163 "invalid data ref offset, have %llu expect aligned to %u", 1164 dref_offset, fs_info->sectorsize); 1165 return -EUCLEAN; 1166 } 1167 inline_refs += btrfs_extent_data_ref_count(leaf, dref); 1168 break; 1169 /* Contains parent bytenr and ref count */ 1170 case BTRFS_SHARED_DATA_REF_KEY: 1171 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1172 if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) { 1173 extent_err(leaf, slot, 1174 "invalid data parent bytenr, have %llu expect aligned to %u", 1175 inline_offset, fs_info->sectorsize); 1176 return -EUCLEAN; 1177 } 1178 inline_refs += btrfs_shared_data_ref_count(leaf, sref); 1179 break; 1180 default: 1181 extent_err(leaf, slot, "unknown inline ref type: %u", 1182 inline_type); 1183 return -EUCLEAN; 1184 } 1185 ptr += btrfs_extent_inline_ref_size(inline_type); 1186 } 1187 /* No padding is allowed */ 1188 if (ptr != end) { 1189 extent_err(leaf, slot, 1190 "invalid extent item size, padding bytes found"); 1191 return -EUCLEAN; 1192 } 1193 1194 /* Finally, check the inline refs against total refs */ 1195 if (inline_refs > total_refs) { 1196 extent_err(leaf, slot, 1197 "invalid extent refs, have %llu expect >= inline %llu", 1198 total_refs, inline_refs); 1199 return -EUCLEAN; 1200 } 1201 return 0; 1202 } 1203 1204 static int check_simple_keyed_refs(struct extent_buffer *leaf, 1205 struct btrfs_key *key, int slot) 1206 { 1207 u32 expect_item_size = 0; 1208 1209 if (key->type == BTRFS_SHARED_DATA_REF_KEY) 1210 expect_item_size = sizeof(struct btrfs_shared_data_ref); 1211 1212 if (btrfs_item_size_nr(leaf, slot) != expect_item_size) { 1213 generic_err(leaf, slot, 1214 "invalid item size, have %u expect %u for key type %u", 1215 btrfs_item_size_nr(leaf, slot), 1216 expect_item_size, key->type); 1217 return -EUCLEAN; 1218 } 1219 if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) { 1220 generic_err(leaf, slot, 1221 "invalid key objectid for shared block ref, have %llu expect aligned to %u", 1222 key->objectid, leaf->fs_info->sectorsize); 1223 return -EUCLEAN; 1224 } 1225 if (key->type != BTRFS_TREE_BLOCK_REF_KEY && 1226 !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize)) { 1227 extent_err(leaf, slot, 1228 "invalid tree parent bytenr, have %llu expect aligned to %u", 1229 key->offset, leaf->fs_info->sectorsize); 1230 return -EUCLEAN; 1231 } 1232 return 0; 1233 } 1234 1235 static int check_extent_data_ref(struct extent_buffer *leaf, 1236 struct btrfs_key *key, int slot) 1237 { 1238 struct btrfs_extent_data_ref *dref; 1239 unsigned long ptr = btrfs_item_ptr_offset(leaf, slot); 1240 const unsigned long end = ptr + btrfs_item_size_nr(leaf, slot); 1241 1242 if (btrfs_item_size_nr(leaf, slot) % sizeof(*dref) != 0) { 1243 generic_err(leaf, slot, 1244 "invalid item size, have %u expect aligned to %zu for key type %u", 1245 btrfs_item_size_nr(leaf, slot), 1246 sizeof(*dref), key->type); 1247 } 1248 if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) { 1249 generic_err(leaf, slot, 1250 "invalid key objectid for shared block ref, have %llu expect aligned to %u", 1251 key->objectid, leaf->fs_info->sectorsize); 1252 return -EUCLEAN; 1253 } 1254 for (; ptr < end; ptr += sizeof(*dref)) { 1255 u64 root_objectid; 1256 u64 owner; 1257 u64 offset; 1258 u64 hash; 1259 1260 dref = (struct btrfs_extent_data_ref *)ptr; 1261 root_objectid = btrfs_extent_data_ref_root(leaf, dref); 1262 owner = btrfs_extent_data_ref_objectid(leaf, dref); 1263 offset = btrfs_extent_data_ref_offset(leaf, dref); 1264 hash = hash_extent_data_ref(root_objectid, owner, offset); 1265 if (hash != key->offset) { 1266 extent_err(leaf, slot, 1267 "invalid extent data ref hash, item has 0x%016llx key has 0x%016llx", 1268 hash, key->offset); 1269 return -EUCLEAN; 1270 } 1271 if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) { 1272 extent_err(leaf, slot, 1273 "invalid extent data backref offset, have %llu expect aligned to %u", 1274 offset, leaf->fs_info->sectorsize); 1275 } 1276 } 1277 return 0; 1278 } 1279 1280 #define inode_ref_err(fs_info, eb, slot, fmt, args...) \ 1281 inode_item_err(fs_info, eb, slot, fmt, ##args) 1282 static int check_inode_ref(struct extent_buffer *leaf, 1283 struct btrfs_key *key, struct btrfs_key *prev_key, 1284 int slot) 1285 { 1286 struct btrfs_inode_ref *iref; 1287 unsigned long ptr; 1288 unsigned long end; 1289 1290 if (!check_prev_ino(leaf, key, slot, prev_key)) 1291 return -EUCLEAN; 1292 /* namelen can't be 0, so item_size == sizeof() is also invalid */ 1293 if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) { 1294 inode_ref_err(fs_info, leaf, slot, 1295 "invalid item size, have %u expect (%zu, %u)", 1296 btrfs_item_size_nr(leaf, slot), 1297 sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info)); 1298 return -EUCLEAN; 1299 } 1300 1301 ptr = btrfs_item_ptr_offset(leaf, slot); 1302 end = ptr + btrfs_item_size_nr(leaf, slot); 1303 while (ptr < end) { 1304 u16 namelen; 1305 1306 if (ptr + sizeof(iref) > end) { 1307 inode_ref_err(fs_info, leaf, slot, 1308 "inode ref overflow, ptr %lu end %lu inode_ref_size %zu", 1309 ptr, end, sizeof(iref)); 1310 return -EUCLEAN; 1311 } 1312 1313 iref = (struct btrfs_inode_ref *)ptr; 1314 namelen = btrfs_inode_ref_name_len(leaf, iref); 1315 if (ptr + sizeof(*iref) + namelen > end) { 1316 inode_ref_err(fs_info, leaf, slot, 1317 "inode ref overflow, ptr %lu end %lu namelen %u", 1318 ptr, end, namelen); 1319 return -EUCLEAN; 1320 } 1321 1322 /* 1323 * NOTE: In theory we should record all found index numbers 1324 * to find any duplicated indexes, but that will be too time 1325 * consuming for inodes with too many hard links. 1326 */ 1327 ptr += sizeof(*iref) + namelen; 1328 } 1329 return 0; 1330 } 1331 1332 /* 1333 * Common point to switch the item-specific validation. 1334 */ 1335 static int check_leaf_item(struct extent_buffer *leaf, 1336 struct btrfs_key *key, int slot, 1337 struct btrfs_key *prev_key) 1338 { 1339 int ret = 0; 1340 struct btrfs_chunk *chunk; 1341 1342 switch (key->type) { 1343 case BTRFS_EXTENT_DATA_KEY: 1344 ret = check_extent_data_item(leaf, key, slot, prev_key); 1345 break; 1346 case BTRFS_EXTENT_CSUM_KEY: 1347 ret = check_csum_item(leaf, key, slot); 1348 break; 1349 case BTRFS_DIR_ITEM_KEY: 1350 case BTRFS_DIR_INDEX_KEY: 1351 case BTRFS_XATTR_ITEM_KEY: 1352 ret = check_dir_item(leaf, key, prev_key, slot); 1353 break; 1354 case BTRFS_INODE_REF_KEY: 1355 ret = check_inode_ref(leaf, key, prev_key, slot); 1356 break; 1357 case BTRFS_BLOCK_GROUP_ITEM_KEY: 1358 ret = check_block_group_item(leaf, key, slot); 1359 break; 1360 case BTRFS_CHUNK_ITEM_KEY: 1361 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 1362 ret = btrfs_check_chunk_valid(leaf, chunk, key->offset); 1363 break; 1364 case BTRFS_DEV_ITEM_KEY: 1365 ret = check_dev_item(leaf, key, slot); 1366 break; 1367 case BTRFS_INODE_ITEM_KEY: 1368 ret = check_inode_item(leaf, key, slot); 1369 break; 1370 case BTRFS_ROOT_ITEM_KEY: 1371 ret = check_root_item(leaf, key, slot); 1372 break; 1373 case BTRFS_EXTENT_ITEM_KEY: 1374 case BTRFS_METADATA_ITEM_KEY: 1375 ret = check_extent_item(leaf, key, slot); 1376 break; 1377 case BTRFS_TREE_BLOCK_REF_KEY: 1378 case BTRFS_SHARED_DATA_REF_KEY: 1379 case BTRFS_SHARED_BLOCK_REF_KEY: 1380 ret = check_simple_keyed_refs(leaf, key, slot); 1381 break; 1382 case BTRFS_EXTENT_DATA_REF_KEY: 1383 ret = check_extent_data_ref(leaf, key, slot); 1384 break; 1385 } 1386 return ret; 1387 } 1388 1389 static int check_leaf(struct extent_buffer *leaf, bool check_item_data) 1390 { 1391 struct btrfs_fs_info *fs_info = leaf->fs_info; 1392 /* No valid key type is 0, so all key should be larger than this key */ 1393 struct btrfs_key prev_key = {0, 0, 0}; 1394 struct btrfs_key key; 1395 u32 nritems = btrfs_header_nritems(leaf); 1396 int slot; 1397 1398 if (btrfs_header_level(leaf) != 0) { 1399 generic_err(leaf, 0, 1400 "invalid level for leaf, have %d expect 0", 1401 btrfs_header_level(leaf)); 1402 return -EUCLEAN; 1403 } 1404 1405 /* 1406 * Extent buffers from a relocation tree have a owner field that 1407 * corresponds to the subvolume tree they are based on. So just from an 1408 * extent buffer alone we can not find out what is the id of the 1409 * corresponding subvolume tree, so we can not figure out if the extent 1410 * buffer corresponds to the root of the relocation tree or not. So 1411 * skip this check for relocation trees. 1412 */ 1413 if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) { 1414 u64 owner = btrfs_header_owner(leaf); 1415 1416 /* These trees must never be empty */ 1417 if (owner == BTRFS_ROOT_TREE_OBJECTID || 1418 owner == BTRFS_CHUNK_TREE_OBJECTID || 1419 owner == BTRFS_EXTENT_TREE_OBJECTID || 1420 owner == BTRFS_DEV_TREE_OBJECTID || 1421 owner == BTRFS_FS_TREE_OBJECTID || 1422 owner == BTRFS_DATA_RELOC_TREE_OBJECTID) { 1423 generic_err(leaf, 0, 1424 "invalid root, root %llu must never be empty", 1425 owner); 1426 return -EUCLEAN; 1427 } 1428 /* Unknown tree */ 1429 if (owner == 0) { 1430 generic_err(leaf, 0, 1431 "invalid owner, root 0 is not defined"); 1432 return -EUCLEAN; 1433 } 1434 return 0; 1435 } 1436 1437 if (nritems == 0) 1438 return 0; 1439 1440 /* 1441 * Check the following things to make sure this is a good leaf, and 1442 * leaf users won't need to bother with similar sanity checks: 1443 * 1444 * 1) key ordering 1445 * 2) item offset and size 1446 * No overlap, no hole, all inside the leaf. 1447 * 3) item content 1448 * If possible, do comprehensive sanity check. 1449 * NOTE: All checks must only rely on the item data itself. 1450 */ 1451 for (slot = 0; slot < nritems; slot++) { 1452 u32 item_end_expected; 1453 int ret; 1454 1455 btrfs_item_key_to_cpu(leaf, &key, slot); 1456 1457 /* Make sure the keys are in the right order */ 1458 if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) { 1459 generic_err(leaf, slot, 1460 "bad key order, prev (%llu %u %llu) current (%llu %u %llu)", 1461 prev_key.objectid, prev_key.type, 1462 prev_key.offset, key.objectid, key.type, 1463 key.offset); 1464 return -EUCLEAN; 1465 } 1466 1467 /* 1468 * Make sure the offset and ends are right, remember that the 1469 * item data starts at the end of the leaf and grows towards the 1470 * front. 1471 */ 1472 if (slot == 0) 1473 item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info); 1474 else 1475 item_end_expected = btrfs_item_offset_nr(leaf, 1476 slot - 1); 1477 if (btrfs_item_end_nr(leaf, slot) != item_end_expected) { 1478 generic_err(leaf, slot, 1479 "unexpected item end, have %u expect %u", 1480 btrfs_item_end_nr(leaf, slot), 1481 item_end_expected); 1482 return -EUCLEAN; 1483 } 1484 1485 /* 1486 * Check to make sure that we don't point outside of the leaf, 1487 * just in case all the items are consistent to each other, but 1488 * all point outside of the leaf. 1489 */ 1490 if (btrfs_item_end_nr(leaf, slot) > 1491 BTRFS_LEAF_DATA_SIZE(fs_info)) { 1492 generic_err(leaf, slot, 1493 "slot end outside of leaf, have %u expect range [0, %u]", 1494 btrfs_item_end_nr(leaf, slot), 1495 BTRFS_LEAF_DATA_SIZE(fs_info)); 1496 return -EUCLEAN; 1497 } 1498 1499 /* Also check if the item pointer overlaps with btrfs item. */ 1500 if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) > 1501 btrfs_item_ptr_offset(leaf, slot)) { 1502 generic_err(leaf, slot, 1503 "slot overlaps with its data, item end %lu data start %lu", 1504 btrfs_item_nr_offset(slot) + 1505 sizeof(struct btrfs_item), 1506 btrfs_item_ptr_offset(leaf, slot)); 1507 return -EUCLEAN; 1508 } 1509 1510 if (check_item_data) { 1511 /* 1512 * Check if the item size and content meet other 1513 * criteria 1514 */ 1515 ret = check_leaf_item(leaf, &key, slot, &prev_key); 1516 if (ret < 0) 1517 return ret; 1518 } 1519 1520 prev_key.objectid = key.objectid; 1521 prev_key.type = key.type; 1522 prev_key.offset = key.offset; 1523 } 1524 1525 return 0; 1526 } 1527 1528 int btrfs_check_leaf_full(struct extent_buffer *leaf) 1529 { 1530 return check_leaf(leaf, true); 1531 } 1532 ALLOW_ERROR_INJECTION(btrfs_check_leaf_full, ERRNO); 1533 1534 int btrfs_check_leaf_relaxed(struct extent_buffer *leaf) 1535 { 1536 return check_leaf(leaf, false); 1537 } 1538 1539 int btrfs_check_node(struct extent_buffer *node) 1540 { 1541 struct btrfs_fs_info *fs_info = node->fs_info; 1542 unsigned long nr = btrfs_header_nritems(node); 1543 struct btrfs_key key, next_key; 1544 int slot; 1545 int level = btrfs_header_level(node); 1546 u64 bytenr; 1547 int ret = 0; 1548 1549 if (level <= 0 || level >= BTRFS_MAX_LEVEL) { 1550 generic_err(node, 0, 1551 "invalid level for node, have %d expect [1, %d]", 1552 level, BTRFS_MAX_LEVEL - 1); 1553 return -EUCLEAN; 1554 } 1555 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) { 1556 btrfs_crit(fs_info, 1557 "corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]", 1558 btrfs_header_owner(node), node->start, 1559 nr == 0 ? "small" : "large", nr, 1560 BTRFS_NODEPTRS_PER_BLOCK(fs_info)); 1561 return -EUCLEAN; 1562 } 1563 1564 for (slot = 0; slot < nr - 1; slot++) { 1565 bytenr = btrfs_node_blockptr(node, slot); 1566 btrfs_node_key_to_cpu(node, &key, slot); 1567 btrfs_node_key_to_cpu(node, &next_key, slot + 1); 1568 1569 if (!bytenr) { 1570 generic_err(node, slot, 1571 "invalid NULL node pointer"); 1572 ret = -EUCLEAN; 1573 goto out; 1574 } 1575 if (!IS_ALIGNED(bytenr, fs_info->sectorsize)) { 1576 generic_err(node, slot, 1577 "unaligned pointer, have %llu should be aligned to %u", 1578 bytenr, fs_info->sectorsize); 1579 ret = -EUCLEAN; 1580 goto out; 1581 } 1582 1583 if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) { 1584 generic_err(node, slot, 1585 "bad key order, current (%llu %u %llu) next (%llu %u %llu)", 1586 key.objectid, key.type, key.offset, 1587 next_key.objectid, next_key.type, 1588 next_key.offset); 1589 ret = -EUCLEAN; 1590 goto out; 1591 } 1592 } 1593 out: 1594 return ret; 1595 } 1596 ALLOW_ERROR_INJECTION(btrfs_check_node, ERRNO); 1597