1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) STRATO AG 2011. All rights reserved. 4 */ 5 6 /* 7 * This module can be used to catch cases when the btrfs kernel 8 * code executes write requests to the disk that bring the file 9 * system in an inconsistent state. In such a state, a power-loss 10 * or kernel panic event would cause that the data on disk is 11 * lost or at least damaged. 12 * 13 * Code is added that examines all block write requests during 14 * runtime (including writes of the super block). Three rules 15 * are verified and an error is printed on violation of the 16 * rules: 17 * 1. It is not allowed to write a disk block which is 18 * currently referenced by the super block (either directly 19 * or indirectly). 20 * 2. When a super block is written, it is verified that all 21 * referenced (directly or indirectly) blocks fulfill the 22 * following requirements: 23 * 2a. All referenced blocks have either been present when 24 * the file system was mounted, (i.e., they have been 25 * referenced by the super block) or they have been 26 * written since then and the write completion callback 27 * was called and no write error was indicated and a 28 * FLUSH request to the device where these blocks are 29 * located was received and completed. 30 * 2b. All referenced blocks need to have a generation 31 * number which is equal to the parent's number. 32 * 33 * One issue that was found using this module was that the log 34 * tree on disk became temporarily corrupted because disk blocks 35 * that had been in use for the log tree had been freed and 36 * reused too early, while being referenced by the written super 37 * block. 38 * 39 * The search term in the kernel log that can be used to filter 40 * on the existence of detected integrity issues is 41 * "btrfs: attempt". 42 * 43 * The integrity check is enabled via mount options. These 44 * mount options are only supported if the integrity check 45 * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY. 46 * 47 * Example #1, apply integrity checks to all metadata: 48 * mount /dev/sdb1 /mnt -o check_int 49 * 50 * Example #2, apply integrity checks to all metadata and 51 * to data extents: 52 * mount /dev/sdb1 /mnt -o check_int_data 53 * 54 * Example #3, apply integrity checks to all metadata and dump 55 * the tree that the super block references to kernel messages 56 * each time after a super block was written: 57 * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263 58 * 59 * If the integrity check tool is included and activated in 60 * the mount options, plenty of kernel memory is used, and 61 * plenty of additional CPU cycles are spent. Enabling this 62 * functionality is not intended for normal use. In most 63 * cases, unless you are a btrfs developer who needs to verify 64 * the integrity of (super)-block write requests, do not 65 * enable the config option BTRFS_FS_CHECK_INTEGRITY to 66 * include and compile the integrity check tool. 67 * 68 * Expect millions of lines of information in the kernel log with an 69 * enabled check_int_print_mask. Therefore set LOG_BUF_SHIFT in the 70 * kernel config to at least 26 (which is 64MB). Usually the value is 71 * limited to 21 (which is 2MB) in init/Kconfig. The file needs to be 72 * changed like this before LOG_BUF_SHIFT can be set to a high value: 73 * config LOG_BUF_SHIFT 74 * int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" 75 * range 12 30 76 */ 77 78 #include <linux/sched.h> 79 #include <linux/slab.h> 80 #include <linux/mutex.h> 81 #include <linux/blkdev.h> 82 #include <linux/mm.h> 83 #include <linux/string.h> 84 #include <crypto/hash.h> 85 #include "messages.h" 86 #include "ctree.h" 87 #include "disk-io.h" 88 #include "transaction.h" 89 #include "extent_io.h" 90 #include "volumes.h" 91 #include "print-tree.h" 92 #include "locking.h" 93 #include "check-integrity.h" 94 #include "rcu-string.h" 95 #include "compression.h" 96 #include "accessors.h" 97 98 #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 99 #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 100 #define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100 101 #define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051 102 #define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807 103 #define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530 104 #define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300 105 #define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters, 106 * excluding " [...]" */ 107 #define BTRFSIC_GENERATION_UNKNOWN ((u64)-1) 108 109 /* 110 * The definition of the bitmask fields for the print_mask. 111 * They are specified with the mount option check_integrity_print_mask. 112 */ 113 #define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE 0x00000001 114 #define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION 0x00000002 115 #define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE 0x00000004 116 #define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE 0x00000008 117 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH 0x00000010 118 #define BTRFSIC_PRINT_MASK_END_IO_BIO_BH 0x00000020 119 #define BTRFSIC_PRINT_MASK_VERBOSE 0x00000040 120 #define BTRFSIC_PRINT_MASK_VERY_VERBOSE 0x00000080 121 #define BTRFSIC_PRINT_MASK_INITIAL_TREE 0x00000100 122 #define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES 0x00000200 123 #define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400 124 #define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800 125 #define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000 126 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE 0x00002000 127 128 struct btrfsic_dev_state; 129 struct btrfsic_state; 130 131 struct btrfsic_block { 132 u32 magic_num; /* only used for debug purposes */ 133 unsigned int is_metadata:1; /* if it is meta-data, not data-data */ 134 unsigned int is_superblock:1; /* if it is one of the superblocks */ 135 unsigned int is_iodone:1; /* if is done by lower subsystem */ 136 unsigned int iodone_w_error:1; /* error was indicated to endio */ 137 unsigned int never_written:1; /* block was added because it was 138 * referenced, not because it was 139 * written */ 140 unsigned int mirror_num; /* large enough to hold 141 * BTRFS_SUPER_MIRROR_MAX */ 142 struct btrfsic_dev_state *dev_state; 143 u64 dev_bytenr; /* key, physical byte num on disk */ 144 u64 logical_bytenr; /* logical byte num on disk */ 145 u64 generation; 146 struct btrfs_disk_key disk_key; /* extra info to print in case of 147 * issues, will not always be correct */ 148 struct list_head collision_resolving_node; /* list node */ 149 struct list_head all_blocks_node; /* list node */ 150 151 /* the following two lists contain block_link items */ 152 struct list_head ref_to_list; /* list */ 153 struct list_head ref_from_list; /* list */ 154 struct btrfsic_block *next_in_same_bio; 155 void *orig_bio_private; 156 bio_end_io_t *orig_bio_end_io; 157 blk_opf_t submit_bio_bh_rw; 158 u64 flush_gen; /* only valid if !never_written */ 159 }; 160 161 /* 162 * Elements of this type are allocated dynamically and required because 163 * each block object can refer to and can be ref from multiple blocks. 164 * The key to lookup them in the hashtable is the dev_bytenr of 165 * the block ref to plus the one from the block referred from. 166 * The fact that they are searchable via a hashtable and that a 167 * ref_cnt is maintained is not required for the btrfs integrity 168 * check algorithm itself, it is only used to make the output more 169 * beautiful in case that an error is detected (an error is defined 170 * as a write operation to a block while that block is still referenced). 171 */ 172 struct btrfsic_block_link { 173 u32 magic_num; /* only used for debug purposes */ 174 u32 ref_cnt; 175 struct list_head node_ref_to; /* list node */ 176 struct list_head node_ref_from; /* list node */ 177 struct list_head collision_resolving_node; /* list node */ 178 struct btrfsic_block *block_ref_to; 179 struct btrfsic_block *block_ref_from; 180 u64 parent_generation; 181 }; 182 183 struct btrfsic_dev_state { 184 u32 magic_num; /* only used for debug purposes */ 185 struct block_device *bdev; 186 struct btrfsic_state *state; 187 struct list_head collision_resolving_node; /* list node */ 188 struct btrfsic_block dummy_block_for_bio_bh_flush; 189 u64 last_flush_gen; 190 }; 191 192 struct btrfsic_block_hashtable { 193 struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE]; 194 }; 195 196 struct btrfsic_block_link_hashtable { 197 struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE]; 198 }; 199 200 struct btrfsic_dev_state_hashtable { 201 struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE]; 202 }; 203 204 struct btrfsic_block_data_ctx { 205 u64 start; /* virtual bytenr */ 206 u64 dev_bytenr; /* physical bytenr on device */ 207 u32 len; 208 struct btrfsic_dev_state *dev; 209 char **datav; 210 struct page **pagev; 211 void *mem_to_free; 212 }; 213 214 /* This structure is used to implement recursion without occupying 215 * any stack space, refer to btrfsic_process_metablock() */ 216 struct btrfsic_stack_frame { 217 u32 magic; 218 u32 nr; 219 int error; 220 int i; 221 int limit_nesting; 222 int num_copies; 223 int mirror_num; 224 struct btrfsic_block *block; 225 struct btrfsic_block_data_ctx *block_ctx; 226 struct btrfsic_block *next_block; 227 struct btrfsic_block_data_ctx next_block_ctx; 228 struct btrfs_header *hdr; 229 struct btrfsic_stack_frame *prev; 230 }; 231 232 /* Some state per mounted filesystem */ 233 struct btrfsic_state { 234 u32 print_mask; 235 int include_extent_data; 236 struct list_head all_blocks_list; 237 struct btrfsic_block_hashtable block_hashtable; 238 struct btrfsic_block_link_hashtable block_link_hashtable; 239 struct btrfs_fs_info *fs_info; 240 u64 max_superblock_generation; 241 struct btrfsic_block *latest_superblock; 242 u32 metablock_size; 243 u32 datablock_size; 244 }; 245 246 static int btrfsic_process_metablock(struct btrfsic_state *state, 247 struct btrfsic_block *block, 248 struct btrfsic_block_data_ctx *block_ctx, 249 int limit_nesting, int force_iodone_flag); 250 static void btrfsic_read_from_block_data( 251 struct btrfsic_block_data_ctx *block_ctx, 252 void *dst, u32 offset, size_t len); 253 static int btrfsic_create_link_to_next_block( 254 struct btrfsic_state *state, 255 struct btrfsic_block *block, 256 struct btrfsic_block_data_ctx 257 *block_ctx, u64 next_bytenr, 258 int limit_nesting, 259 struct btrfsic_block_data_ctx *next_block_ctx, 260 struct btrfsic_block **next_blockp, 261 int force_iodone_flag, 262 int *num_copiesp, int *mirror_nump, 263 struct btrfs_disk_key *disk_key, 264 u64 parent_generation); 265 static int btrfsic_handle_extent_data(struct btrfsic_state *state, 266 struct btrfsic_block *block, 267 struct btrfsic_block_data_ctx *block_ctx, 268 u32 item_offset, int force_iodone_flag); 269 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 270 struct btrfsic_block_data_ctx *block_ctx_out, 271 int mirror_num); 272 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx); 273 static int btrfsic_read_block(struct btrfsic_state *state, 274 struct btrfsic_block_data_ctx *block_ctx); 275 static int btrfsic_process_written_superblock( 276 struct btrfsic_state *state, 277 struct btrfsic_block *const block, 278 struct btrfs_super_block *const super_hdr); 279 static void btrfsic_bio_end_io(struct bio *bp); 280 static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state, 281 const struct btrfsic_block *block, 282 int recursion_level); 283 static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, 284 struct btrfsic_block *const block, 285 int recursion_level); 286 static void btrfsic_print_add_link(const struct btrfsic_state *state, 287 const struct btrfsic_block_link *l); 288 static void btrfsic_print_rem_link(const struct btrfsic_state *state, 289 const struct btrfsic_block_link *l); 290 static char btrfsic_get_block_type(const struct btrfsic_state *state, 291 const struct btrfsic_block *block); 292 static void btrfsic_dump_tree(const struct btrfsic_state *state); 293 static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, 294 const struct btrfsic_block *block, 295 int indent_level); 296 static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( 297 struct btrfsic_state *state, 298 struct btrfsic_block_data_ctx *next_block_ctx, 299 struct btrfsic_block *next_block, 300 struct btrfsic_block *from_block, 301 u64 parent_generation); 302 static struct btrfsic_block *btrfsic_block_lookup_or_add( 303 struct btrfsic_state *state, 304 struct btrfsic_block_data_ctx *block_ctx, 305 const char *additional_string, 306 int is_metadata, 307 int is_iodone, 308 int never_written, 309 int mirror_num, 310 int *was_created); 311 static int btrfsic_process_superblock_dev_mirror( 312 struct btrfsic_state *state, 313 struct btrfsic_dev_state *dev_state, 314 struct btrfs_device *device, 315 int superblock_mirror_num, 316 struct btrfsic_dev_state **selected_dev_state, 317 struct btrfs_super_block *selected_super); 318 static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev); 319 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 320 u64 bytenr, 321 struct btrfsic_dev_state *dev_state, 322 u64 dev_bytenr); 323 324 static struct mutex btrfsic_mutex; 325 static int btrfsic_is_initialized; 326 static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable; 327 328 329 static void btrfsic_block_init(struct btrfsic_block *b) 330 { 331 b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER; 332 b->dev_state = NULL; 333 b->dev_bytenr = 0; 334 b->logical_bytenr = 0; 335 b->generation = BTRFSIC_GENERATION_UNKNOWN; 336 b->disk_key.objectid = 0; 337 b->disk_key.type = 0; 338 b->disk_key.offset = 0; 339 b->is_metadata = 0; 340 b->is_superblock = 0; 341 b->is_iodone = 0; 342 b->iodone_w_error = 0; 343 b->never_written = 0; 344 b->mirror_num = 0; 345 b->next_in_same_bio = NULL; 346 b->orig_bio_private = NULL; 347 b->orig_bio_end_io = NULL; 348 INIT_LIST_HEAD(&b->collision_resolving_node); 349 INIT_LIST_HEAD(&b->all_blocks_node); 350 INIT_LIST_HEAD(&b->ref_to_list); 351 INIT_LIST_HEAD(&b->ref_from_list); 352 b->submit_bio_bh_rw = 0; 353 b->flush_gen = 0; 354 } 355 356 static struct btrfsic_block *btrfsic_block_alloc(void) 357 { 358 struct btrfsic_block *b; 359 360 b = kzalloc(sizeof(*b), GFP_NOFS); 361 if (NULL != b) 362 btrfsic_block_init(b); 363 364 return b; 365 } 366 367 static void btrfsic_block_free(struct btrfsic_block *b) 368 { 369 BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num)); 370 kfree(b); 371 } 372 373 static void btrfsic_block_link_init(struct btrfsic_block_link *l) 374 { 375 l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER; 376 l->ref_cnt = 1; 377 INIT_LIST_HEAD(&l->node_ref_to); 378 INIT_LIST_HEAD(&l->node_ref_from); 379 INIT_LIST_HEAD(&l->collision_resolving_node); 380 l->block_ref_to = NULL; 381 l->block_ref_from = NULL; 382 } 383 384 static struct btrfsic_block_link *btrfsic_block_link_alloc(void) 385 { 386 struct btrfsic_block_link *l; 387 388 l = kzalloc(sizeof(*l), GFP_NOFS); 389 if (NULL != l) 390 btrfsic_block_link_init(l); 391 392 return l; 393 } 394 395 static void btrfsic_block_link_free(struct btrfsic_block_link *l) 396 { 397 BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num)); 398 kfree(l); 399 } 400 401 static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds) 402 { 403 ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER; 404 ds->bdev = NULL; 405 ds->state = NULL; 406 INIT_LIST_HEAD(&ds->collision_resolving_node); 407 ds->last_flush_gen = 0; 408 btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush); 409 ds->dummy_block_for_bio_bh_flush.is_iodone = 1; 410 ds->dummy_block_for_bio_bh_flush.dev_state = ds; 411 } 412 413 static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void) 414 { 415 struct btrfsic_dev_state *ds; 416 417 ds = kzalloc(sizeof(*ds), GFP_NOFS); 418 if (NULL != ds) 419 btrfsic_dev_state_init(ds); 420 421 return ds; 422 } 423 424 static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds) 425 { 426 BUG_ON(!(NULL == ds || 427 BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num)); 428 kfree(ds); 429 } 430 431 static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h) 432 { 433 int i; 434 435 for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++) 436 INIT_LIST_HEAD(h->table + i); 437 } 438 439 static void btrfsic_block_hashtable_add(struct btrfsic_block *b, 440 struct btrfsic_block_hashtable *h) 441 { 442 const unsigned int hashval = 443 (((unsigned int)(b->dev_bytenr >> 16)) ^ 444 ((unsigned int)((uintptr_t)b->dev_state->bdev))) & 445 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); 446 447 list_add(&b->collision_resolving_node, h->table + hashval); 448 } 449 450 static void btrfsic_block_hashtable_remove(struct btrfsic_block *b) 451 { 452 list_del(&b->collision_resolving_node); 453 } 454 455 static struct btrfsic_block *btrfsic_block_hashtable_lookup( 456 struct block_device *bdev, 457 u64 dev_bytenr, 458 struct btrfsic_block_hashtable *h) 459 { 460 const unsigned int hashval = 461 (((unsigned int)(dev_bytenr >> 16)) ^ 462 ((unsigned int)((uintptr_t)bdev))) & 463 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); 464 struct btrfsic_block *b; 465 466 list_for_each_entry(b, h->table + hashval, collision_resolving_node) { 467 if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr) 468 return b; 469 } 470 471 return NULL; 472 } 473 474 static void btrfsic_block_link_hashtable_init( 475 struct btrfsic_block_link_hashtable *h) 476 { 477 int i; 478 479 for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++) 480 INIT_LIST_HEAD(h->table + i); 481 } 482 483 static void btrfsic_block_link_hashtable_add( 484 struct btrfsic_block_link *l, 485 struct btrfsic_block_link_hashtable *h) 486 { 487 const unsigned int hashval = 488 (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^ 489 ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^ 490 ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^ 491 ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev))) 492 & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); 493 494 BUG_ON(NULL == l->block_ref_to); 495 BUG_ON(NULL == l->block_ref_from); 496 list_add(&l->collision_resolving_node, h->table + hashval); 497 } 498 499 static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l) 500 { 501 list_del(&l->collision_resolving_node); 502 } 503 504 static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup( 505 struct block_device *bdev_ref_to, 506 u64 dev_bytenr_ref_to, 507 struct block_device *bdev_ref_from, 508 u64 dev_bytenr_ref_from, 509 struct btrfsic_block_link_hashtable *h) 510 { 511 const unsigned int hashval = 512 (((unsigned int)(dev_bytenr_ref_to >> 16)) ^ 513 ((unsigned int)(dev_bytenr_ref_from >> 16)) ^ 514 ((unsigned int)((uintptr_t)bdev_ref_to)) ^ 515 ((unsigned int)((uintptr_t)bdev_ref_from))) & 516 (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); 517 struct btrfsic_block_link *l; 518 519 list_for_each_entry(l, h->table + hashval, collision_resolving_node) { 520 BUG_ON(NULL == l->block_ref_to); 521 BUG_ON(NULL == l->block_ref_from); 522 if (l->block_ref_to->dev_state->bdev == bdev_ref_to && 523 l->block_ref_to->dev_bytenr == dev_bytenr_ref_to && 524 l->block_ref_from->dev_state->bdev == bdev_ref_from && 525 l->block_ref_from->dev_bytenr == dev_bytenr_ref_from) 526 return l; 527 } 528 529 return NULL; 530 } 531 532 static void btrfsic_dev_state_hashtable_init( 533 struct btrfsic_dev_state_hashtable *h) 534 { 535 int i; 536 537 for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++) 538 INIT_LIST_HEAD(h->table + i); 539 } 540 541 static void btrfsic_dev_state_hashtable_add( 542 struct btrfsic_dev_state *ds, 543 struct btrfsic_dev_state_hashtable *h) 544 { 545 const unsigned int hashval = 546 (((unsigned int)((uintptr_t)ds->bdev->bd_dev)) & 547 (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1)); 548 549 list_add(&ds->collision_resolving_node, h->table + hashval); 550 } 551 552 static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds) 553 { 554 list_del(&ds->collision_resolving_node); 555 } 556 557 static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev, 558 struct btrfsic_dev_state_hashtable *h) 559 { 560 const unsigned int hashval = 561 dev & (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1); 562 struct btrfsic_dev_state *ds; 563 564 list_for_each_entry(ds, h->table + hashval, collision_resolving_node) { 565 if (ds->bdev->bd_dev == dev) 566 return ds; 567 } 568 569 return NULL; 570 } 571 572 static int btrfsic_process_superblock(struct btrfsic_state *state, 573 struct btrfs_fs_devices *fs_devices) 574 { 575 struct btrfs_super_block *selected_super; 576 struct list_head *dev_head = &fs_devices->devices; 577 struct btrfs_device *device; 578 struct btrfsic_dev_state *selected_dev_state = NULL; 579 int ret = 0; 580 int pass; 581 582 selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS); 583 if (!selected_super) 584 return -ENOMEM; 585 586 list_for_each_entry(device, dev_head, dev_list) { 587 int i; 588 struct btrfsic_dev_state *dev_state; 589 590 if (!device->bdev || !device->name) 591 continue; 592 593 dev_state = btrfsic_dev_state_lookup(device->bdev->bd_dev); 594 BUG_ON(NULL == dev_state); 595 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 596 ret = btrfsic_process_superblock_dev_mirror( 597 state, dev_state, device, i, 598 &selected_dev_state, selected_super); 599 if (0 != ret && 0 == i) { 600 kfree(selected_super); 601 return ret; 602 } 603 } 604 } 605 606 if (NULL == state->latest_superblock) { 607 pr_info("btrfsic: no superblock found!\n"); 608 kfree(selected_super); 609 return -1; 610 } 611 612 for (pass = 0; pass < 3; pass++) { 613 int num_copies; 614 int mirror_num; 615 u64 next_bytenr; 616 617 switch (pass) { 618 case 0: 619 next_bytenr = btrfs_super_root(selected_super); 620 if (state->print_mask & 621 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 622 pr_info("root@%llu\n", next_bytenr); 623 break; 624 case 1: 625 next_bytenr = btrfs_super_chunk_root(selected_super); 626 if (state->print_mask & 627 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 628 pr_info("chunk@%llu\n", next_bytenr); 629 break; 630 case 2: 631 next_bytenr = btrfs_super_log_root(selected_super); 632 if (0 == next_bytenr) 633 continue; 634 if (state->print_mask & 635 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 636 pr_info("log@%llu\n", next_bytenr); 637 break; 638 } 639 640 num_copies = btrfs_num_copies(state->fs_info, next_bytenr, 641 state->metablock_size); 642 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 643 pr_info("num_copies(log_bytenr=%llu) = %d\n", 644 next_bytenr, num_copies); 645 646 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 647 struct btrfsic_block *next_block; 648 struct btrfsic_block_data_ctx tmp_next_block_ctx; 649 struct btrfsic_block_link *l; 650 651 ret = btrfsic_map_block(state, next_bytenr, 652 state->metablock_size, 653 &tmp_next_block_ctx, 654 mirror_num); 655 if (ret) { 656 pr_info("btrfsic: btrfsic_map_block(root @%llu, mirror %d) failed!\n", 657 next_bytenr, mirror_num); 658 kfree(selected_super); 659 return -1; 660 } 661 662 next_block = btrfsic_block_hashtable_lookup( 663 tmp_next_block_ctx.dev->bdev, 664 tmp_next_block_ctx.dev_bytenr, 665 &state->block_hashtable); 666 BUG_ON(NULL == next_block); 667 668 l = btrfsic_block_link_hashtable_lookup( 669 tmp_next_block_ctx.dev->bdev, 670 tmp_next_block_ctx.dev_bytenr, 671 state->latest_superblock->dev_state-> 672 bdev, 673 state->latest_superblock->dev_bytenr, 674 &state->block_link_hashtable); 675 BUG_ON(NULL == l); 676 677 ret = btrfsic_read_block(state, &tmp_next_block_ctx); 678 if (ret < (int)PAGE_SIZE) { 679 pr_info("btrfsic: read @logical %llu failed!\n", 680 tmp_next_block_ctx.start); 681 btrfsic_release_block_ctx(&tmp_next_block_ctx); 682 kfree(selected_super); 683 return -1; 684 } 685 686 ret = btrfsic_process_metablock(state, 687 next_block, 688 &tmp_next_block_ctx, 689 BTRFS_MAX_LEVEL + 3, 1); 690 btrfsic_release_block_ctx(&tmp_next_block_ctx); 691 } 692 } 693 694 kfree(selected_super); 695 return ret; 696 } 697 698 static int btrfsic_process_superblock_dev_mirror( 699 struct btrfsic_state *state, 700 struct btrfsic_dev_state *dev_state, 701 struct btrfs_device *device, 702 int superblock_mirror_num, 703 struct btrfsic_dev_state **selected_dev_state, 704 struct btrfs_super_block *selected_super) 705 { 706 struct btrfs_fs_info *fs_info = state->fs_info; 707 struct btrfs_super_block *super_tmp; 708 u64 dev_bytenr; 709 struct btrfsic_block *superblock_tmp; 710 int pass; 711 struct block_device *const superblock_bdev = device->bdev; 712 struct page *page; 713 struct address_space *mapping = superblock_bdev->bd_inode->i_mapping; 714 int ret = 0; 715 716 /* super block bytenr is always the unmapped device bytenr */ 717 dev_bytenr = btrfs_sb_offset(superblock_mirror_num); 718 if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->commit_total_bytes) 719 return -1; 720 721 page = read_cache_page_gfp(mapping, dev_bytenr >> PAGE_SHIFT, GFP_NOFS); 722 if (IS_ERR(page)) 723 return -1; 724 725 super_tmp = page_address(page); 726 727 if (btrfs_super_bytenr(super_tmp) != dev_bytenr || 728 btrfs_super_magic(super_tmp) != BTRFS_MAGIC || 729 memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) || 730 btrfs_super_nodesize(super_tmp) != state->metablock_size || 731 btrfs_super_sectorsize(super_tmp) != state->datablock_size) { 732 ret = 0; 733 goto out; 734 } 735 736 superblock_tmp = 737 btrfsic_block_hashtable_lookup(superblock_bdev, 738 dev_bytenr, 739 &state->block_hashtable); 740 if (NULL == superblock_tmp) { 741 superblock_tmp = btrfsic_block_alloc(); 742 if (NULL == superblock_tmp) { 743 ret = -1; 744 goto out; 745 } 746 /* for superblock, only the dev_bytenr makes sense */ 747 superblock_tmp->dev_bytenr = dev_bytenr; 748 superblock_tmp->dev_state = dev_state; 749 superblock_tmp->logical_bytenr = dev_bytenr; 750 superblock_tmp->generation = btrfs_super_generation(super_tmp); 751 superblock_tmp->is_metadata = 1; 752 superblock_tmp->is_superblock = 1; 753 superblock_tmp->is_iodone = 1; 754 superblock_tmp->never_written = 0; 755 superblock_tmp->mirror_num = 1 + superblock_mirror_num; 756 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 757 btrfs_info_in_rcu(fs_info, 758 "new initial S-block (bdev %p, %s) @%llu (%pg/%llu/%d)", 759 superblock_bdev, 760 btrfs_dev_name(device), dev_bytenr, 761 dev_state->bdev, dev_bytenr, 762 superblock_mirror_num); 763 list_add(&superblock_tmp->all_blocks_node, 764 &state->all_blocks_list); 765 btrfsic_block_hashtable_add(superblock_tmp, 766 &state->block_hashtable); 767 } 768 769 /* select the one with the highest generation field */ 770 if (btrfs_super_generation(super_tmp) > 771 state->max_superblock_generation || 772 0 == state->max_superblock_generation) { 773 memcpy(selected_super, super_tmp, sizeof(*selected_super)); 774 *selected_dev_state = dev_state; 775 state->max_superblock_generation = 776 btrfs_super_generation(super_tmp); 777 state->latest_superblock = superblock_tmp; 778 } 779 780 for (pass = 0; pass < 3; pass++) { 781 u64 next_bytenr; 782 int num_copies; 783 int mirror_num; 784 const char *additional_string = NULL; 785 struct btrfs_disk_key tmp_disk_key; 786 787 tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY; 788 tmp_disk_key.offset = 0; 789 switch (pass) { 790 case 0: 791 btrfs_set_disk_key_objectid(&tmp_disk_key, 792 BTRFS_ROOT_TREE_OBJECTID); 793 additional_string = "initial root "; 794 next_bytenr = btrfs_super_root(super_tmp); 795 break; 796 case 1: 797 btrfs_set_disk_key_objectid(&tmp_disk_key, 798 BTRFS_CHUNK_TREE_OBJECTID); 799 additional_string = "initial chunk "; 800 next_bytenr = btrfs_super_chunk_root(super_tmp); 801 break; 802 case 2: 803 btrfs_set_disk_key_objectid(&tmp_disk_key, 804 BTRFS_TREE_LOG_OBJECTID); 805 additional_string = "initial log "; 806 next_bytenr = btrfs_super_log_root(super_tmp); 807 if (0 == next_bytenr) 808 continue; 809 break; 810 } 811 812 num_copies = btrfs_num_copies(fs_info, next_bytenr, 813 state->metablock_size); 814 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 815 pr_info("num_copies(log_bytenr=%llu) = %d\n", 816 next_bytenr, num_copies); 817 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 818 struct btrfsic_block *next_block; 819 struct btrfsic_block_data_ctx tmp_next_block_ctx; 820 struct btrfsic_block_link *l; 821 822 if (btrfsic_map_block(state, next_bytenr, 823 state->metablock_size, 824 &tmp_next_block_ctx, 825 mirror_num)) { 826 pr_info("btrfsic: btrfsic_map_block(bytenr @%llu, mirror %d) failed!\n", 827 next_bytenr, mirror_num); 828 ret = -1; 829 goto out; 830 } 831 832 next_block = btrfsic_block_lookup_or_add( 833 state, &tmp_next_block_ctx, 834 additional_string, 1, 1, 0, 835 mirror_num, NULL); 836 if (NULL == next_block) { 837 btrfsic_release_block_ctx(&tmp_next_block_ctx); 838 ret = -1; 839 goto out; 840 } 841 842 next_block->disk_key = tmp_disk_key; 843 next_block->generation = BTRFSIC_GENERATION_UNKNOWN; 844 l = btrfsic_block_link_lookup_or_add( 845 state, &tmp_next_block_ctx, 846 next_block, superblock_tmp, 847 BTRFSIC_GENERATION_UNKNOWN); 848 btrfsic_release_block_ctx(&tmp_next_block_ctx); 849 if (NULL == l) { 850 ret = -1; 851 goto out; 852 } 853 } 854 } 855 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES) 856 btrfsic_dump_tree_sub(state, superblock_tmp, 0); 857 858 out: 859 put_page(page); 860 return ret; 861 } 862 863 static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void) 864 { 865 struct btrfsic_stack_frame *sf; 866 867 sf = kzalloc(sizeof(*sf), GFP_NOFS); 868 if (sf) 869 sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER; 870 return sf; 871 } 872 873 static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf) 874 { 875 BUG_ON(!(NULL == sf || 876 BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic)); 877 kfree(sf); 878 } 879 880 static noinline_for_stack int btrfsic_process_metablock( 881 struct btrfsic_state *state, 882 struct btrfsic_block *const first_block, 883 struct btrfsic_block_data_ctx *const first_block_ctx, 884 int first_limit_nesting, int force_iodone_flag) 885 { 886 struct btrfsic_stack_frame initial_stack_frame = { 0 }; 887 struct btrfsic_stack_frame *sf; 888 struct btrfsic_stack_frame *next_stack; 889 struct btrfs_header *const first_hdr = 890 (struct btrfs_header *)first_block_ctx->datav[0]; 891 892 BUG_ON(!first_hdr); 893 sf = &initial_stack_frame; 894 sf->error = 0; 895 sf->i = -1; 896 sf->limit_nesting = first_limit_nesting; 897 sf->block = first_block; 898 sf->block_ctx = first_block_ctx; 899 sf->next_block = NULL; 900 sf->hdr = first_hdr; 901 sf->prev = NULL; 902 903 continue_with_new_stack_frame: 904 sf->block->generation = btrfs_stack_header_generation(sf->hdr); 905 if (0 == sf->hdr->level) { 906 struct btrfs_leaf *const leafhdr = 907 (struct btrfs_leaf *)sf->hdr; 908 909 if (-1 == sf->i) { 910 sf->nr = btrfs_stack_header_nritems(&leafhdr->header); 911 912 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 913 pr_info("leaf %llu items %d generation %llu owner %llu\n", 914 sf->block_ctx->start, sf->nr, 915 btrfs_stack_header_generation( 916 &leafhdr->header), 917 btrfs_stack_header_owner( 918 &leafhdr->header)); 919 } 920 921 continue_with_current_leaf_stack_frame: 922 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { 923 sf->i++; 924 sf->num_copies = 0; 925 } 926 927 if (sf->i < sf->nr) { 928 struct btrfs_item disk_item; 929 u32 disk_item_offset = 930 (uintptr_t)(leafhdr->items + sf->i) - 931 (uintptr_t)leafhdr; 932 struct btrfs_disk_key *disk_key; 933 u8 type; 934 u32 item_offset; 935 u32 item_size; 936 937 if (disk_item_offset + sizeof(struct btrfs_item) > 938 sf->block_ctx->len) { 939 leaf_item_out_of_bounce_error: 940 pr_info( 941 "btrfsic: leaf item out of bounce at logical %llu, dev %pg\n", 942 sf->block_ctx->start, 943 sf->block_ctx->dev->bdev); 944 goto one_stack_frame_backwards; 945 } 946 btrfsic_read_from_block_data(sf->block_ctx, 947 &disk_item, 948 disk_item_offset, 949 sizeof(struct btrfs_item)); 950 item_offset = btrfs_stack_item_offset(&disk_item); 951 item_size = btrfs_stack_item_size(&disk_item); 952 disk_key = &disk_item.key; 953 type = btrfs_disk_key_type(disk_key); 954 955 if (BTRFS_ROOT_ITEM_KEY == type) { 956 struct btrfs_root_item root_item; 957 u32 root_item_offset; 958 u64 next_bytenr; 959 960 root_item_offset = item_offset + 961 offsetof(struct btrfs_leaf, items); 962 if (root_item_offset + item_size > 963 sf->block_ctx->len) 964 goto leaf_item_out_of_bounce_error; 965 btrfsic_read_from_block_data( 966 sf->block_ctx, &root_item, 967 root_item_offset, 968 item_size); 969 next_bytenr = btrfs_root_bytenr(&root_item); 970 971 sf->error = 972 btrfsic_create_link_to_next_block( 973 state, 974 sf->block, 975 sf->block_ctx, 976 next_bytenr, 977 sf->limit_nesting, 978 &sf->next_block_ctx, 979 &sf->next_block, 980 force_iodone_flag, 981 &sf->num_copies, 982 &sf->mirror_num, 983 disk_key, 984 btrfs_root_generation( 985 &root_item)); 986 if (sf->error) 987 goto one_stack_frame_backwards; 988 989 if (NULL != sf->next_block) { 990 struct btrfs_header *const next_hdr = 991 (struct btrfs_header *) 992 sf->next_block_ctx.datav[0]; 993 994 next_stack = 995 btrfsic_stack_frame_alloc(); 996 if (NULL == next_stack) { 997 sf->error = -1; 998 btrfsic_release_block_ctx( 999 &sf-> 1000 next_block_ctx); 1001 goto one_stack_frame_backwards; 1002 } 1003 1004 next_stack->i = -1; 1005 next_stack->block = sf->next_block; 1006 next_stack->block_ctx = 1007 &sf->next_block_ctx; 1008 next_stack->next_block = NULL; 1009 next_stack->hdr = next_hdr; 1010 next_stack->limit_nesting = 1011 sf->limit_nesting - 1; 1012 next_stack->prev = sf; 1013 sf = next_stack; 1014 goto continue_with_new_stack_frame; 1015 } 1016 } else if (BTRFS_EXTENT_DATA_KEY == type && 1017 state->include_extent_data) { 1018 sf->error = btrfsic_handle_extent_data( 1019 state, 1020 sf->block, 1021 sf->block_ctx, 1022 item_offset, 1023 force_iodone_flag); 1024 if (sf->error) 1025 goto one_stack_frame_backwards; 1026 } 1027 1028 goto continue_with_current_leaf_stack_frame; 1029 } 1030 } else { 1031 struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr; 1032 1033 if (-1 == sf->i) { 1034 sf->nr = btrfs_stack_header_nritems(&nodehdr->header); 1035 1036 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1037 pr_info("node %llu level %d items %d generation %llu owner %llu\n", 1038 sf->block_ctx->start, 1039 nodehdr->header.level, sf->nr, 1040 btrfs_stack_header_generation( 1041 &nodehdr->header), 1042 btrfs_stack_header_owner( 1043 &nodehdr->header)); 1044 } 1045 1046 continue_with_current_node_stack_frame: 1047 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { 1048 sf->i++; 1049 sf->num_copies = 0; 1050 } 1051 1052 if (sf->i < sf->nr) { 1053 struct btrfs_key_ptr key_ptr; 1054 u32 key_ptr_offset; 1055 u64 next_bytenr; 1056 1057 key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) - 1058 (uintptr_t)nodehdr; 1059 if (key_ptr_offset + sizeof(struct btrfs_key_ptr) > 1060 sf->block_ctx->len) { 1061 pr_info( 1062 "btrfsic: node item out of bounce at logical %llu, dev %pg\n", 1063 sf->block_ctx->start, 1064 sf->block_ctx->dev->bdev); 1065 goto one_stack_frame_backwards; 1066 } 1067 btrfsic_read_from_block_data( 1068 sf->block_ctx, &key_ptr, key_ptr_offset, 1069 sizeof(struct btrfs_key_ptr)); 1070 next_bytenr = btrfs_stack_key_blockptr(&key_ptr); 1071 1072 sf->error = btrfsic_create_link_to_next_block( 1073 state, 1074 sf->block, 1075 sf->block_ctx, 1076 next_bytenr, 1077 sf->limit_nesting, 1078 &sf->next_block_ctx, 1079 &sf->next_block, 1080 force_iodone_flag, 1081 &sf->num_copies, 1082 &sf->mirror_num, 1083 &key_ptr.key, 1084 btrfs_stack_key_generation(&key_ptr)); 1085 if (sf->error) 1086 goto one_stack_frame_backwards; 1087 1088 if (NULL != sf->next_block) { 1089 struct btrfs_header *const next_hdr = 1090 (struct btrfs_header *) 1091 sf->next_block_ctx.datav[0]; 1092 1093 next_stack = btrfsic_stack_frame_alloc(); 1094 if (NULL == next_stack) { 1095 sf->error = -1; 1096 goto one_stack_frame_backwards; 1097 } 1098 1099 next_stack->i = -1; 1100 next_stack->block = sf->next_block; 1101 next_stack->block_ctx = &sf->next_block_ctx; 1102 next_stack->next_block = NULL; 1103 next_stack->hdr = next_hdr; 1104 next_stack->limit_nesting = 1105 sf->limit_nesting - 1; 1106 next_stack->prev = sf; 1107 sf = next_stack; 1108 goto continue_with_new_stack_frame; 1109 } 1110 1111 goto continue_with_current_node_stack_frame; 1112 } 1113 } 1114 1115 one_stack_frame_backwards: 1116 if (NULL != sf->prev) { 1117 struct btrfsic_stack_frame *const prev = sf->prev; 1118 1119 /* the one for the initial block is freed in the caller */ 1120 btrfsic_release_block_ctx(sf->block_ctx); 1121 1122 if (sf->error) { 1123 prev->error = sf->error; 1124 btrfsic_stack_frame_free(sf); 1125 sf = prev; 1126 goto one_stack_frame_backwards; 1127 } 1128 1129 btrfsic_stack_frame_free(sf); 1130 sf = prev; 1131 goto continue_with_new_stack_frame; 1132 } else { 1133 BUG_ON(&initial_stack_frame != sf); 1134 } 1135 1136 return sf->error; 1137 } 1138 1139 static void btrfsic_read_from_block_data( 1140 struct btrfsic_block_data_ctx *block_ctx, 1141 void *dstv, u32 offset, size_t len) 1142 { 1143 size_t cur; 1144 size_t pgoff; 1145 char *kaddr; 1146 char *dst = (char *)dstv; 1147 size_t start_offset = offset_in_page(block_ctx->start); 1148 unsigned long i = (start_offset + offset) >> PAGE_SHIFT; 1149 1150 WARN_ON(offset + len > block_ctx->len); 1151 pgoff = offset_in_page(start_offset + offset); 1152 1153 while (len > 0) { 1154 cur = min(len, ((size_t)PAGE_SIZE - pgoff)); 1155 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE)); 1156 kaddr = block_ctx->datav[i]; 1157 memcpy(dst, kaddr + pgoff, cur); 1158 1159 dst += cur; 1160 len -= cur; 1161 pgoff = 0; 1162 i++; 1163 } 1164 } 1165 1166 static int btrfsic_create_link_to_next_block( 1167 struct btrfsic_state *state, 1168 struct btrfsic_block *block, 1169 struct btrfsic_block_data_ctx *block_ctx, 1170 u64 next_bytenr, 1171 int limit_nesting, 1172 struct btrfsic_block_data_ctx *next_block_ctx, 1173 struct btrfsic_block **next_blockp, 1174 int force_iodone_flag, 1175 int *num_copiesp, int *mirror_nump, 1176 struct btrfs_disk_key *disk_key, 1177 u64 parent_generation) 1178 { 1179 struct btrfs_fs_info *fs_info = state->fs_info; 1180 struct btrfsic_block *next_block = NULL; 1181 int ret; 1182 struct btrfsic_block_link *l; 1183 int did_alloc_block_link; 1184 int block_was_created; 1185 1186 *next_blockp = NULL; 1187 if (0 == *num_copiesp) { 1188 *num_copiesp = btrfs_num_copies(fs_info, next_bytenr, 1189 state->metablock_size); 1190 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1191 pr_info("num_copies(log_bytenr=%llu) = %d\n", 1192 next_bytenr, *num_copiesp); 1193 *mirror_nump = 1; 1194 } 1195 1196 if (*mirror_nump > *num_copiesp) 1197 return 0; 1198 1199 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1200 pr_info("btrfsic_create_link_to_next_block(mirror_num=%d)\n", 1201 *mirror_nump); 1202 ret = btrfsic_map_block(state, next_bytenr, 1203 state->metablock_size, 1204 next_block_ctx, *mirror_nump); 1205 if (ret) { 1206 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 1207 next_bytenr, *mirror_nump); 1208 btrfsic_release_block_ctx(next_block_ctx); 1209 *next_blockp = NULL; 1210 return -1; 1211 } 1212 1213 next_block = btrfsic_block_lookup_or_add(state, 1214 next_block_ctx, "referenced ", 1215 1, force_iodone_flag, 1216 !force_iodone_flag, 1217 *mirror_nump, 1218 &block_was_created); 1219 if (NULL == next_block) { 1220 btrfsic_release_block_ctx(next_block_ctx); 1221 *next_blockp = NULL; 1222 return -1; 1223 } 1224 if (block_was_created) { 1225 l = NULL; 1226 next_block->generation = BTRFSIC_GENERATION_UNKNOWN; 1227 } else { 1228 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) { 1229 if (next_block->logical_bytenr != next_bytenr && 1230 !(!next_block->is_metadata && 1231 0 == next_block->logical_bytenr)) 1232 pr_info( 1233 "referenced block @%llu (%pg/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu)\n", 1234 next_bytenr, next_block_ctx->dev->bdev, 1235 next_block_ctx->dev_bytenr, *mirror_nump, 1236 btrfsic_get_block_type(state, 1237 next_block), 1238 next_block->logical_bytenr); 1239 else 1240 pr_info( 1241 "referenced block @%llu (%pg/%llu/%d) found in hash table, %c\n", 1242 next_bytenr, next_block_ctx->dev->bdev, 1243 next_block_ctx->dev_bytenr, *mirror_nump, 1244 btrfsic_get_block_type(state, 1245 next_block)); 1246 } 1247 next_block->logical_bytenr = next_bytenr; 1248 1249 next_block->mirror_num = *mirror_nump; 1250 l = btrfsic_block_link_hashtable_lookup( 1251 next_block_ctx->dev->bdev, 1252 next_block_ctx->dev_bytenr, 1253 block_ctx->dev->bdev, 1254 block_ctx->dev_bytenr, 1255 &state->block_link_hashtable); 1256 } 1257 1258 next_block->disk_key = *disk_key; 1259 if (NULL == l) { 1260 l = btrfsic_block_link_alloc(); 1261 if (NULL == l) { 1262 btrfsic_release_block_ctx(next_block_ctx); 1263 *next_blockp = NULL; 1264 return -1; 1265 } 1266 1267 did_alloc_block_link = 1; 1268 l->block_ref_to = next_block; 1269 l->block_ref_from = block; 1270 l->ref_cnt = 1; 1271 l->parent_generation = parent_generation; 1272 1273 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1274 btrfsic_print_add_link(state, l); 1275 1276 list_add(&l->node_ref_to, &block->ref_to_list); 1277 list_add(&l->node_ref_from, &next_block->ref_from_list); 1278 1279 btrfsic_block_link_hashtable_add(l, 1280 &state->block_link_hashtable); 1281 } else { 1282 did_alloc_block_link = 0; 1283 if (0 == limit_nesting) { 1284 l->ref_cnt++; 1285 l->parent_generation = parent_generation; 1286 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1287 btrfsic_print_add_link(state, l); 1288 } 1289 } 1290 1291 if (limit_nesting > 0 && did_alloc_block_link) { 1292 ret = btrfsic_read_block(state, next_block_ctx); 1293 if (ret < (int)next_block_ctx->len) { 1294 pr_info("btrfsic: read block @logical %llu failed!\n", 1295 next_bytenr); 1296 btrfsic_release_block_ctx(next_block_ctx); 1297 *next_blockp = NULL; 1298 return -1; 1299 } 1300 1301 *next_blockp = next_block; 1302 } else { 1303 *next_blockp = NULL; 1304 } 1305 (*mirror_nump)++; 1306 1307 return 0; 1308 } 1309 1310 static int btrfsic_handle_extent_data( 1311 struct btrfsic_state *state, 1312 struct btrfsic_block *block, 1313 struct btrfsic_block_data_ctx *block_ctx, 1314 u32 item_offset, int force_iodone_flag) 1315 { 1316 struct btrfs_fs_info *fs_info = state->fs_info; 1317 struct btrfs_file_extent_item file_extent_item; 1318 u64 file_extent_item_offset; 1319 u64 next_bytenr; 1320 u64 num_bytes; 1321 u64 generation; 1322 struct btrfsic_block_link *l; 1323 int ret; 1324 1325 file_extent_item_offset = offsetof(struct btrfs_leaf, items) + 1326 item_offset; 1327 if (file_extent_item_offset + 1328 offsetof(struct btrfs_file_extent_item, disk_num_bytes) > 1329 block_ctx->len) { 1330 pr_info("btrfsic: file item out of bounce at logical %llu, dev %pg\n", 1331 block_ctx->start, block_ctx->dev->bdev); 1332 return -1; 1333 } 1334 1335 btrfsic_read_from_block_data(block_ctx, &file_extent_item, 1336 file_extent_item_offset, 1337 offsetof(struct btrfs_file_extent_item, disk_num_bytes)); 1338 if (BTRFS_FILE_EXTENT_REG != file_extent_item.type || 1339 btrfs_stack_file_extent_disk_bytenr(&file_extent_item) == 0) { 1340 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1341 pr_info("extent_data: type %u, disk_bytenr = %llu\n", 1342 file_extent_item.type, 1343 btrfs_stack_file_extent_disk_bytenr( 1344 &file_extent_item)); 1345 return 0; 1346 } 1347 1348 if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) > 1349 block_ctx->len) { 1350 pr_info("btrfsic: file item out of bounce at logical %llu, dev %pg\n", 1351 block_ctx->start, block_ctx->dev->bdev); 1352 return -1; 1353 } 1354 btrfsic_read_from_block_data(block_ctx, &file_extent_item, 1355 file_extent_item_offset, 1356 sizeof(struct btrfs_file_extent_item)); 1357 next_bytenr = btrfs_stack_file_extent_disk_bytenr(&file_extent_item); 1358 if (btrfs_stack_file_extent_compression(&file_extent_item) == 1359 BTRFS_COMPRESS_NONE) { 1360 next_bytenr += btrfs_stack_file_extent_offset(&file_extent_item); 1361 num_bytes = btrfs_stack_file_extent_num_bytes(&file_extent_item); 1362 } else { 1363 num_bytes = btrfs_stack_file_extent_disk_num_bytes(&file_extent_item); 1364 } 1365 generation = btrfs_stack_file_extent_generation(&file_extent_item); 1366 1367 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1368 pr_info("extent_data: type %u, disk_bytenr = %llu, offset = %llu, num_bytes = %llu\n", 1369 file_extent_item.type, 1370 btrfs_stack_file_extent_disk_bytenr(&file_extent_item), 1371 btrfs_stack_file_extent_offset(&file_extent_item), 1372 num_bytes); 1373 while (num_bytes > 0) { 1374 u32 chunk_len; 1375 int num_copies; 1376 int mirror_num; 1377 1378 if (num_bytes > state->datablock_size) 1379 chunk_len = state->datablock_size; 1380 else 1381 chunk_len = num_bytes; 1382 1383 num_copies = btrfs_num_copies(fs_info, next_bytenr, 1384 state->datablock_size); 1385 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1386 pr_info("num_copies(log_bytenr=%llu) = %d\n", 1387 next_bytenr, num_copies); 1388 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 1389 struct btrfsic_block_data_ctx next_block_ctx; 1390 struct btrfsic_block *next_block; 1391 int block_was_created; 1392 1393 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1394 pr_info("btrfsic_handle_extent_data(mirror_num=%d)\n", 1395 mirror_num); 1396 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1397 pr_info("\tdisk_bytenr = %llu, num_bytes %u\n", 1398 next_bytenr, chunk_len); 1399 ret = btrfsic_map_block(state, next_bytenr, 1400 chunk_len, &next_block_ctx, 1401 mirror_num); 1402 if (ret) { 1403 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 1404 next_bytenr, mirror_num); 1405 return -1; 1406 } 1407 1408 next_block = btrfsic_block_lookup_or_add( 1409 state, 1410 &next_block_ctx, 1411 "referenced ", 1412 0, 1413 force_iodone_flag, 1414 !force_iodone_flag, 1415 mirror_num, 1416 &block_was_created); 1417 if (NULL == next_block) { 1418 btrfsic_release_block_ctx(&next_block_ctx); 1419 return -1; 1420 } 1421 if (!block_was_created) { 1422 if ((state->print_mask & 1423 BTRFSIC_PRINT_MASK_VERBOSE) && 1424 next_block->logical_bytenr != next_bytenr && 1425 !(!next_block->is_metadata && 1426 0 == next_block->logical_bytenr)) { 1427 pr_info( 1428 "referenced block @%llu (%pg/%llu/%d) found in hash table, D, bytenr mismatch (!= stored %llu)\n", 1429 next_bytenr, 1430 next_block_ctx.dev->bdev, 1431 next_block_ctx.dev_bytenr, 1432 mirror_num, 1433 next_block->logical_bytenr); 1434 } 1435 next_block->logical_bytenr = next_bytenr; 1436 next_block->mirror_num = mirror_num; 1437 } 1438 1439 l = btrfsic_block_link_lookup_or_add(state, 1440 &next_block_ctx, 1441 next_block, block, 1442 generation); 1443 btrfsic_release_block_ctx(&next_block_ctx); 1444 if (NULL == l) 1445 return -1; 1446 } 1447 1448 next_bytenr += chunk_len; 1449 num_bytes -= chunk_len; 1450 } 1451 1452 return 0; 1453 } 1454 1455 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 1456 struct btrfsic_block_data_ctx *block_ctx_out, 1457 int mirror_num) 1458 { 1459 struct btrfs_fs_info *fs_info = state->fs_info; 1460 int ret; 1461 u64 length; 1462 struct btrfs_io_context *bioc = NULL; 1463 struct btrfs_io_stripe smap, *map; 1464 struct btrfs_device *device; 1465 1466 length = len; 1467 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, bytenr, &length, &bioc, 1468 NULL, &mirror_num, 0); 1469 if (ret) { 1470 block_ctx_out->start = 0; 1471 block_ctx_out->dev_bytenr = 0; 1472 block_ctx_out->len = 0; 1473 block_ctx_out->dev = NULL; 1474 block_ctx_out->datav = NULL; 1475 block_ctx_out->pagev = NULL; 1476 block_ctx_out->mem_to_free = NULL; 1477 1478 return ret; 1479 } 1480 1481 if (bioc) 1482 map = &bioc->stripes[0]; 1483 else 1484 map = &smap; 1485 1486 device = map->dev; 1487 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) || 1488 !device->bdev || !device->name) 1489 block_ctx_out->dev = NULL; 1490 else 1491 block_ctx_out->dev = btrfsic_dev_state_lookup( 1492 device->bdev->bd_dev); 1493 block_ctx_out->dev_bytenr = map->physical; 1494 block_ctx_out->start = bytenr; 1495 block_ctx_out->len = len; 1496 block_ctx_out->datav = NULL; 1497 block_ctx_out->pagev = NULL; 1498 block_ctx_out->mem_to_free = NULL; 1499 1500 kfree(bioc); 1501 if (NULL == block_ctx_out->dev) { 1502 ret = -ENXIO; 1503 pr_info("btrfsic: error, cannot lookup dev (#1)!\n"); 1504 } 1505 1506 return ret; 1507 } 1508 1509 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx) 1510 { 1511 if (block_ctx->mem_to_free) { 1512 unsigned int num_pages; 1513 1514 BUG_ON(!block_ctx->datav); 1515 BUG_ON(!block_ctx->pagev); 1516 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> 1517 PAGE_SHIFT; 1518 /* Pages must be unmapped in reverse order */ 1519 while (num_pages > 0) { 1520 num_pages--; 1521 if (block_ctx->datav[num_pages]) 1522 block_ctx->datav[num_pages] = NULL; 1523 if (block_ctx->pagev[num_pages]) { 1524 __free_page(block_ctx->pagev[num_pages]); 1525 block_ctx->pagev[num_pages] = NULL; 1526 } 1527 } 1528 1529 kfree(block_ctx->mem_to_free); 1530 block_ctx->mem_to_free = NULL; 1531 block_ctx->pagev = NULL; 1532 block_ctx->datav = NULL; 1533 } 1534 } 1535 1536 static int btrfsic_read_block(struct btrfsic_state *state, 1537 struct btrfsic_block_data_ctx *block_ctx) 1538 { 1539 unsigned int num_pages; 1540 unsigned int i; 1541 size_t size; 1542 u64 dev_bytenr; 1543 int ret; 1544 1545 BUG_ON(block_ctx->datav); 1546 BUG_ON(block_ctx->pagev); 1547 BUG_ON(block_ctx->mem_to_free); 1548 if (!PAGE_ALIGNED(block_ctx->dev_bytenr)) { 1549 pr_info("btrfsic: read_block() with unaligned bytenr %llu\n", 1550 block_ctx->dev_bytenr); 1551 return -1; 1552 } 1553 1554 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> 1555 PAGE_SHIFT; 1556 size = sizeof(*block_ctx->datav) + sizeof(*block_ctx->pagev); 1557 block_ctx->mem_to_free = kcalloc(num_pages, size, GFP_NOFS); 1558 if (!block_ctx->mem_to_free) 1559 return -ENOMEM; 1560 block_ctx->datav = block_ctx->mem_to_free; 1561 block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages); 1562 ret = btrfs_alloc_page_array(num_pages, block_ctx->pagev); 1563 if (ret) 1564 return ret; 1565 1566 dev_bytenr = block_ctx->dev_bytenr; 1567 for (i = 0; i < num_pages;) { 1568 struct bio *bio; 1569 unsigned int j; 1570 1571 bio = bio_alloc(block_ctx->dev->bdev, num_pages - i, 1572 REQ_OP_READ, GFP_NOFS); 1573 bio->bi_iter.bi_sector = dev_bytenr >> SECTOR_SHIFT; 1574 1575 for (j = i; j < num_pages; j++) { 1576 ret = bio_add_page(bio, block_ctx->pagev[j], 1577 PAGE_SIZE, 0); 1578 if (PAGE_SIZE != ret) 1579 break; 1580 } 1581 if (j == i) { 1582 pr_info("btrfsic: error, failed to add a single page!\n"); 1583 return -1; 1584 } 1585 if (submit_bio_wait(bio)) { 1586 pr_info("btrfsic: read error at logical %llu dev %pg!\n", 1587 block_ctx->start, block_ctx->dev->bdev); 1588 bio_put(bio); 1589 return -1; 1590 } 1591 bio_put(bio); 1592 dev_bytenr += (j - i) * PAGE_SIZE; 1593 i = j; 1594 } 1595 for (i = 0; i < num_pages; i++) 1596 block_ctx->datav[i] = page_address(block_ctx->pagev[i]); 1597 1598 return block_ctx->len; 1599 } 1600 1601 static void btrfsic_dump_database(struct btrfsic_state *state) 1602 { 1603 const struct btrfsic_block *b_all; 1604 1605 BUG_ON(NULL == state); 1606 1607 pr_info("all_blocks_list:\n"); 1608 list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) { 1609 const struct btrfsic_block_link *l; 1610 1611 pr_info("%c-block @%llu (%pg/%llu/%d)\n", 1612 btrfsic_get_block_type(state, b_all), 1613 b_all->logical_bytenr, b_all->dev_state->bdev, 1614 b_all->dev_bytenr, b_all->mirror_num); 1615 1616 list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) { 1617 pr_info( 1618 " %c @%llu (%pg/%llu/%d) refers %u* to %c @%llu (%pg/%llu/%d)\n", 1619 btrfsic_get_block_type(state, b_all), 1620 b_all->logical_bytenr, b_all->dev_state->bdev, 1621 b_all->dev_bytenr, b_all->mirror_num, 1622 l->ref_cnt, 1623 btrfsic_get_block_type(state, l->block_ref_to), 1624 l->block_ref_to->logical_bytenr, 1625 l->block_ref_to->dev_state->bdev, 1626 l->block_ref_to->dev_bytenr, 1627 l->block_ref_to->mirror_num); 1628 } 1629 1630 list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) { 1631 pr_info( 1632 " %c @%llu (%pg/%llu/%d) is ref %u* from %c @%llu (%pg/%llu/%d)\n", 1633 btrfsic_get_block_type(state, b_all), 1634 b_all->logical_bytenr, b_all->dev_state->bdev, 1635 b_all->dev_bytenr, b_all->mirror_num, 1636 l->ref_cnt, 1637 btrfsic_get_block_type(state, l->block_ref_from), 1638 l->block_ref_from->logical_bytenr, 1639 l->block_ref_from->dev_state->bdev, 1640 l->block_ref_from->dev_bytenr, 1641 l->block_ref_from->mirror_num); 1642 } 1643 1644 pr_info("\n"); 1645 } 1646 } 1647 1648 /* 1649 * Test whether the disk block contains a tree block (leaf or node) 1650 * (note that this test fails for the super block) 1651 */ 1652 static noinline_for_stack int btrfsic_test_for_metadata( 1653 struct btrfsic_state *state, 1654 char **datav, unsigned int num_pages) 1655 { 1656 struct btrfs_fs_info *fs_info = state->fs_info; 1657 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 1658 struct btrfs_header *h; 1659 u8 csum[BTRFS_CSUM_SIZE]; 1660 unsigned int i; 1661 1662 if (num_pages * PAGE_SIZE < state->metablock_size) 1663 return 1; /* not metadata */ 1664 num_pages = state->metablock_size >> PAGE_SHIFT; 1665 h = (struct btrfs_header *)datav[0]; 1666 1667 if (memcmp(h->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE)) 1668 return 1; 1669 1670 shash->tfm = fs_info->csum_shash; 1671 crypto_shash_init(shash); 1672 1673 for (i = 0; i < num_pages; i++) { 1674 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE); 1675 size_t sublen = i ? PAGE_SIZE : 1676 (PAGE_SIZE - BTRFS_CSUM_SIZE); 1677 1678 crypto_shash_update(shash, data, sublen); 1679 } 1680 crypto_shash_final(shash, csum); 1681 if (memcmp(csum, h->csum, fs_info->csum_size)) 1682 return 1; 1683 1684 return 0; /* is metadata */ 1685 } 1686 1687 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 1688 u64 dev_bytenr, char **mapped_datav, 1689 unsigned int num_pages, 1690 struct bio *bio, int *bio_is_patched, 1691 blk_opf_t submit_bio_bh_rw) 1692 { 1693 int is_metadata; 1694 struct btrfsic_block *block; 1695 struct btrfsic_block_data_ctx block_ctx; 1696 int ret; 1697 struct btrfsic_state *state = dev_state->state; 1698 struct block_device *bdev = dev_state->bdev; 1699 unsigned int processed_len; 1700 1701 if (NULL != bio_is_patched) 1702 *bio_is_patched = 0; 1703 1704 again: 1705 if (num_pages == 0) 1706 return; 1707 1708 processed_len = 0; 1709 is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav, 1710 num_pages)); 1711 1712 block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr, 1713 &state->block_hashtable); 1714 if (NULL != block) { 1715 u64 bytenr = 0; 1716 struct btrfsic_block_link *l, *tmp; 1717 1718 if (block->is_superblock) { 1719 bytenr = btrfs_super_bytenr((struct btrfs_super_block *) 1720 mapped_datav[0]); 1721 if (num_pages * PAGE_SIZE < 1722 BTRFS_SUPER_INFO_SIZE) { 1723 pr_info("btrfsic: cannot work with too short bios!\n"); 1724 return; 1725 } 1726 is_metadata = 1; 1727 BUG_ON(!PAGE_ALIGNED(BTRFS_SUPER_INFO_SIZE)); 1728 processed_len = BTRFS_SUPER_INFO_SIZE; 1729 if (state->print_mask & 1730 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { 1731 pr_info("[before new superblock is written]:\n"); 1732 btrfsic_dump_tree_sub(state, block, 0); 1733 } 1734 } 1735 if (is_metadata) { 1736 if (!block->is_superblock) { 1737 if (num_pages * PAGE_SIZE < 1738 state->metablock_size) { 1739 pr_info("btrfsic: cannot work with too short bios!\n"); 1740 return; 1741 } 1742 processed_len = state->metablock_size; 1743 bytenr = btrfs_stack_header_bytenr( 1744 (struct btrfs_header *) 1745 mapped_datav[0]); 1746 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, 1747 dev_state, 1748 dev_bytenr); 1749 } 1750 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) { 1751 if (block->logical_bytenr != bytenr && 1752 !(!block->is_metadata && 1753 block->logical_bytenr == 0)) 1754 pr_info( 1755 "written block @%llu (%pg/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu)\n", 1756 bytenr, dev_state->bdev, 1757 dev_bytenr, 1758 block->mirror_num, 1759 btrfsic_get_block_type(state, 1760 block), 1761 block->logical_bytenr); 1762 else 1763 pr_info( 1764 "written block @%llu (%pg/%llu/%d) found in hash table, %c\n", 1765 bytenr, dev_state->bdev, 1766 dev_bytenr, block->mirror_num, 1767 btrfsic_get_block_type(state, 1768 block)); 1769 } 1770 block->logical_bytenr = bytenr; 1771 } else { 1772 if (num_pages * PAGE_SIZE < 1773 state->datablock_size) { 1774 pr_info("btrfsic: cannot work with too short bios!\n"); 1775 return; 1776 } 1777 processed_len = state->datablock_size; 1778 bytenr = block->logical_bytenr; 1779 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1780 pr_info( 1781 "written block @%llu (%pg/%llu/%d) found in hash table, %c\n", 1782 bytenr, dev_state->bdev, dev_bytenr, 1783 block->mirror_num, 1784 btrfsic_get_block_type(state, block)); 1785 } 1786 1787 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1788 pr_info("ref_to_list: %cE, ref_from_list: %cE\n", 1789 list_empty(&block->ref_to_list) ? ' ' : '!', 1790 list_empty(&block->ref_from_list) ? ' ' : '!'); 1791 if (btrfsic_is_block_ref_by_superblock(state, block, 0)) { 1792 pr_info( 1793 "btrfs: attempt to overwrite %c-block @%llu (%pg/%llu/%d), old(gen=%llu, objectid=%llu, type=%d, offset=%llu), new(gen=%llu), which is referenced by most recent superblock (superblockgen=%llu)!\n", 1794 btrfsic_get_block_type(state, block), bytenr, 1795 dev_state->bdev, dev_bytenr, block->mirror_num, 1796 block->generation, 1797 btrfs_disk_key_objectid(&block->disk_key), 1798 block->disk_key.type, 1799 btrfs_disk_key_offset(&block->disk_key), 1800 btrfs_stack_header_generation( 1801 (struct btrfs_header *) mapped_datav[0]), 1802 state->max_superblock_generation); 1803 btrfsic_dump_tree(state); 1804 } 1805 1806 if (!block->is_iodone && !block->never_written) { 1807 pr_info( 1808 "btrfs: attempt to overwrite %c-block @%llu (%pg/%llu/%d), oldgen=%llu, newgen=%llu, which is not yet iodone!\n", 1809 btrfsic_get_block_type(state, block), bytenr, 1810 dev_state->bdev, dev_bytenr, block->mirror_num, 1811 block->generation, 1812 btrfs_stack_header_generation( 1813 (struct btrfs_header *) 1814 mapped_datav[0])); 1815 /* it would not be safe to go on */ 1816 btrfsic_dump_tree(state); 1817 goto continue_loop; 1818 } 1819 1820 /* 1821 * Clear all references of this block. Do not free 1822 * the block itself even if is not referenced anymore 1823 * because it still carries valuable information 1824 * like whether it was ever written and IO completed. 1825 */ 1826 list_for_each_entry_safe(l, tmp, &block->ref_to_list, 1827 node_ref_to) { 1828 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1829 btrfsic_print_rem_link(state, l); 1830 l->ref_cnt--; 1831 if (0 == l->ref_cnt) { 1832 list_del(&l->node_ref_to); 1833 list_del(&l->node_ref_from); 1834 btrfsic_block_link_hashtable_remove(l); 1835 btrfsic_block_link_free(l); 1836 } 1837 } 1838 1839 block_ctx.dev = dev_state; 1840 block_ctx.dev_bytenr = dev_bytenr; 1841 block_ctx.start = bytenr; 1842 block_ctx.len = processed_len; 1843 block_ctx.pagev = NULL; 1844 block_ctx.mem_to_free = NULL; 1845 block_ctx.datav = mapped_datav; 1846 1847 if (is_metadata || state->include_extent_data) { 1848 block->never_written = 0; 1849 block->iodone_w_error = 0; 1850 if (NULL != bio) { 1851 block->is_iodone = 0; 1852 BUG_ON(NULL == bio_is_patched); 1853 if (!*bio_is_patched) { 1854 block->orig_bio_private = 1855 bio->bi_private; 1856 block->orig_bio_end_io = 1857 bio->bi_end_io; 1858 block->next_in_same_bio = NULL; 1859 bio->bi_private = block; 1860 bio->bi_end_io = btrfsic_bio_end_io; 1861 *bio_is_patched = 1; 1862 } else { 1863 struct btrfsic_block *chained_block = 1864 (struct btrfsic_block *) 1865 bio->bi_private; 1866 1867 BUG_ON(NULL == chained_block); 1868 block->orig_bio_private = 1869 chained_block->orig_bio_private; 1870 block->orig_bio_end_io = 1871 chained_block->orig_bio_end_io; 1872 block->next_in_same_bio = chained_block; 1873 bio->bi_private = block; 1874 } 1875 } else { 1876 block->is_iodone = 1; 1877 block->orig_bio_private = NULL; 1878 block->orig_bio_end_io = NULL; 1879 block->next_in_same_bio = NULL; 1880 } 1881 } 1882 1883 block->flush_gen = dev_state->last_flush_gen + 1; 1884 block->submit_bio_bh_rw = submit_bio_bh_rw; 1885 if (is_metadata) { 1886 block->logical_bytenr = bytenr; 1887 block->is_metadata = 1; 1888 if (block->is_superblock) { 1889 BUG_ON(PAGE_SIZE != 1890 BTRFS_SUPER_INFO_SIZE); 1891 ret = btrfsic_process_written_superblock( 1892 state, 1893 block, 1894 (struct btrfs_super_block *) 1895 mapped_datav[0]); 1896 if (state->print_mask & 1897 BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) { 1898 pr_info("[after new superblock is written]:\n"); 1899 btrfsic_dump_tree_sub(state, block, 0); 1900 } 1901 } else { 1902 block->mirror_num = 0; /* unknown */ 1903 ret = btrfsic_process_metablock( 1904 state, 1905 block, 1906 &block_ctx, 1907 0, 0); 1908 } 1909 if (ret) 1910 pr_info("btrfsic: btrfsic_process_metablock(root @%llu) failed!\n", 1911 dev_bytenr); 1912 } else { 1913 block->is_metadata = 0; 1914 block->mirror_num = 0; /* unknown */ 1915 block->generation = BTRFSIC_GENERATION_UNKNOWN; 1916 if (!state->include_extent_data 1917 && list_empty(&block->ref_from_list)) { 1918 /* 1919 * disk block is overwritten with extent 1920 * data (not meta data) and we are configured 1921 * to not include extent data: take the 1922 * chance and free the block's memory 1923 */ 1924 btrfsic_block_hashtable_remove(block); 1925 list_del(&block->all_blocks_node); 1926 btrfsic_block_free(block); 1927 } 1928 } 1929 btrfsic_release_block_ctx(&block_ctx); 1930 } else { 1931 /* block has not been found in hash table */ 1932 u64 bytenr; 1933 1934 if (!is_metadata) { 1935 processed_len = state->datablock_size; 1936 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1937 pr_info( 1938 "written block (%pg/%llu/?) !found in hash table, D\n", 1939 dev_state->bdev, dev_bytenr); 1940 if (!state->include_extent_data) { 1941 /* ignore that written D block */ 1942 goto continue_loop; 1943 } 1944 1945 /* this is getting ugly for the 1946 * include_extent_data case... */ 1947 bytenr = 0; /* unknown */ 1948 } else { 1949 processed_len = state->metablock_size; 1950 bytenr = btrfs_stack_header_bytenr( 1951 (struct btrfs_header *) 1952 mapped_datav[0]); 1953 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state, 1954 dev_bytenr); 1955 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1956 pr_info( 1957 "written block @%llu (%pg/%llu/?) !found in hash table, M\n", 1958 bytenr, dev_state->bdev, dev_bytenr); 1959 } 1960 1961 block_ctx.dev = dev_state; 1962 block_ctx.dev_bytenr = dev_bytenr; 1963 block_ctx.start = bytenr; 1964 block_ctx.len = processed_len; 1965 block_ctx.pagev = NULL; 1966 block_ctx.mem_to_free = NULL; 1967 block_ctx.datav = mapped_datav; 1968 1969 block = btrfsic_block_alloc(); 1970 if (NULL == block) { 1971 btrfsic_release_block_ctx(&block_ctx); 1972 goto continue_loop; 1973 } 1974 block->dev_state = dev_state; 1975 block->dev_bytenr = dev_bytenr; 1976 block->logical_bytenr = bytenr; 1977 block->is_metadata = is_metadata; 1978 block->never_written = 0; 1979 block->iodone_w_error = 0; 1980 block->mirror_num = 0; /* unknown */ 1981 block->flush_gen = dev_state->last_flush_gen + 1; 1982 block->submit_bio_bh_rw = submit_bio_bh_rw; 1983 if (NULL != bio) { 1984 block->is_iodone = 0; 1985 BUG_ON(NULL == bio_is_patched); 1986 if (!*bio_is_patched) { 1987 block->orig_bio_private = bio->bi_private; 1988 block->orig_bio_end_io = bio->bi_end_io; 1989 block->next_in_same_bio = NULL; 1990 bio->bi_private = block; 1991 bio->bi_end_io = btrfsic_bio_end_io; 1992 *bio_is_patched = 1; 1993 } else { 1994 struct btrfsic_block *chained_block = 1995 (struct btrfsic_block *) 1996 bio->bi_private; 1997 1998 BUG_ON(NULL == chained_block); 1999 block->orig_bio_private = 2000 chained_block->orig_bio_private; 2001 block->orig_bio_end_io = 2002 chained_block->orig_bio_end_io; 2003 block->next_in_same_bio = chained_block; 2004 bio->bi_private = block; 2005 } 2006 } else { 2007 block->is_iodone = 1; 2008 block->orig_bio_private = NULL; 2009 block->orig_bio_end_io = NULL; 2010 block->next_in_same_bio = NULL; 2011 } 2012 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2013 pr_info("new written %c-block @%llu (%pg/%llu/%d)\n", 2014 is_metadata ? 'M' : 'D', 2015 block->logical_bytenr, block->dev_state->bdev, 2016 block->dev_bytenr, block->mirror_num); 2017 list_add(&block->all_blocks_node, &state->all_blocks_list); 2018 btrfsic_block_hashtable_add(block, &state->block_hashtable); 2019 2020 if (is_metadata) { 2021 ret = btrfsic_process_metablock(state, block, 2022 &block_ctx, 0, 0); 2023 if (ret) 2024 pr_info("btrfsic: process_metablock(root @%llu) failed!\n", 2025 dev_bytenr); 2026 } 2027 btrfsic_release_block_ctx(&block_ctx); 2028 } 2029 2030 continue_loop: 2031 BUG_ON(!processed_len); 2032 dev_bytenr += processed_len; 2033 mapped_datav += processed_len >> PAGE_SHIFT; 2034 num_pages -= processed_len >> PAGE_SHIFT; 2035 goto again; 2036 } 2037 2038 static void btrfsic_bio_end_io(struct bio *bp) 2039 { 2040 struct btrfsic_block *block = bp->bi_private; 2041 int iodone_w_error; 2042 2043 /* mutex is not held! This is not save if IO is not yet completed 2044 * on umount */ 2045 iodone_w_error = 0; 2046 if (bp->bi_status) 2047 iodone_w_error = 1; 2048 2049 BUG_ON(NULL == block); 2050 bp->bi_private = block->orig_bio_private; 2051 bp->bi_end_io = block->orig_bio_end_io; 2052 2053 do { 2054 struct btrfsic_block *next_block; 2055 struct btrfsic_dev_state *const dev_state = block->dev_state; 2056 2057 if ((dev_state->state->print_mask & 2058 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2059 pr_info("bio_end_io(err=%d) for %c @%llu (%pg/%llu/%d)\n", 2060 bp->bi_status, 2061 btrfsic_get_block_type(dev_state->state, block), 2062 block->logical_bytenr, dev_state->bdev, 2063 block->dev_bytenr, block->mirror_num); 2064 next_block = block->next_in_same_bio; 2065 block->iodone_w_error = iodone_w_error; 2066 if (block->submit_bio_bh_rw & REQ_PREFLUSH) { 2067 dev_state->last_flush_gen++; 2068 if ((dev_state->state->print_mask & 2069 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2070 pr_info("bio_end_io() new %pg flush_gen=%llu\n", 2071 dev_state->bdev, 2072 dev_state->last_flush_gen); 2073 } 2074 if (block->submit_bio_bh_rw & REQ_FUA) 2075 block->flush_gen = 0; /* FUA completed means block is 2076 * on disk */ 2077 block->is_iodone = 1; /* for FLUSH, this releases the block */ 2078 block = next_block; 2079 } while (NULL != block); 2080 2081 bp->bi_end_io(bp); 2082 } 2083 2084 static int btrfsic_process_written_superblock( 2085 struct btrfsic_state *state, 2086 struct btrfsic_block *const superblock, 2087 struct btrfs_super_block *const super_hdr) 2088 { 2089 struct btrfs_fs_info *fs_info = state->fs_info; 2090 int pass; 2091 2092 superblock->generation = btrfs_super_generation(super_hdr); 2093 if (!(superblock->generation > state->max_superblock_generation || 2094 0 == state->max_superblock_generation)) { 2095 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2096 pr_info( 2097 "btrfsic: superblock @%llu (%pg/%llu/%d) with old gen %llu <= %llu\n", 2098 superblock->logical_bytenr, 2099 superblock->dev_state->bdev, 2100 superblock->dev_bytenr, superblock->mirror_num, 2101 btrfs_super_generation(super_hdr), 2102 state->max_superblock_generation); 2103 } else { 2104 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2105 pr_info( 2106 "btrfsic: got new superblock @%llu (%pg/%llu/%d) with new gen %llu > %llu\n", 2107 superblock->logical_bytenr, 2108 superblock->dev_state->bdev, 2109 superblock->dev_bytenr, superblock->mirror_num, 2110 btrfs_super_generation(super_hdr), 2111 state->max_superblock_generation); 2112 2113 state->max_superblock_generation = 2114 btrfs_super_generation(super_hdr); 2115 state->latest_superblock = superblock; 2116 } 2117 2118 for (pass = 0; pass < 3; pass++) { 2119 int ret; 2120 u64 next_bytenr; 2121 struct btrfsic_block *next_block; 2122 struct btrfsic_block_data_ctx tmp_next_block_ctx; 2123 struct btrfsic_block_link *l; 2124 int num_copies; 2125 int mirror_num; 2126 const char *additional_string = NULL; 2127 struct btrfs_disk_key tmp_disk_key = {0}; 2128 2129 btrfs_set_disk_key_objectid(&tmp_disk_key, 2130 BTRFS_ROOT_ITEM_KEY); 2131 btrfs_set_disk_key_objectid(&tmp_disk_key, 0); 2132 2133 switch (pass) { 2134 case 0: 2135 btrfs_set_disk_key_objectid(&tmp_disk_key, 2136 BTRFS_ROOT_TREE_OBJECTID); 2137 additional_string = "root "; 2138 next_bytenr = btrfs_super_root(super_hdr); 2139 if (state->print_mask & 2140 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2141 pr_info("root@%llu\n", next_bytenr); 2142 break; 2143 case 1: 2144 btrfs_set_disk_key_objectid(&tmp_disk_key, 2145 BTRFS_CHUNK_TREE_OBJECTID); 2146 additional_string = "chunk "; 2147 next_bytenr = btrfs_super_chunk_root(super_hdr); 2148 if (state->print_mask & 2149 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2150 pr_info("chunk@%llu\n", next_bytenr); 2151 break; 2152 case 2: 2153 btrfs_set_disk_key_objectid(&tmp_disk_key, 2154 BTRFS_TREE_LOG_OBJECTID); 2155 additional_string = "log "; 2156 next_bytenr = btrfs_super_log_root(super_hdr); 2157 if (0 == next_bytenr) 2158 continue; 2159 if (state->print_mask & 2160 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2161 pr_info("log@%llu\n", next_bytenr); 2162 break; 2163 } 2164 2165 num_copies = btrfs_num_copies(fs_info, next_bytenr, 2166 BTRFS_SUPER_INFO_SIZE); 2167 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 2168 pr_info("num_copies(log_bytenr=%llu) = %d\n", 2169 next_bytenr, num_copies); 2170 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2171 int was_created; 2172 2173 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2174 pr_info("btrfsic_process_written_superblock(mirror_num=%d)\n", mirror_num); 2175 ret = btrfsic_map_block(state, next_bytenr, 2176 BTRFS_SUPER_INFO_SIZE, 2177 &tmp_next_block_ctx, 2178 mirror_num); 2179 if (ret) { 2180 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 2181 next_bytenr, mirror_num); 2182 return -1; 2183 } 2184 2185 next_block = btrfsic_block_lookup_or_add( 2186 state, 2187 &tmp_next_block_ctx, 2188 additional_string, 2189 1, 0, 1, 2190 mirror_num, 2191 &was_created); 2192 if (NULL == next_block) { 2193 btrfsic_release_block_ctx(&tmp_next_block_ctx); 2194 return -1; 2195 } 2196 2197 next_block->disk_key = tmp_disk_key; 2198 if (was_created) 2199 next_block->generation = 2200 BTRFSIC_GENERATION_UNKNOWN; 2201 l = btrfsic_block_link_lookup_or_add( 2202 state, 2203 &tmp_next_block_ctx, 2204 next_block, 2205 superblock, 2206 BTRFSIC_GENERATION_UNKNOWN); 2207 btrfsic_release_block_ctx(&tmp_next_block_ctx); 2208 if (NULL == l) 2209 return -1; 2210 } 2211 } 2212 2213 if (WARN_ON(-1 == btrfsic_check_all_ref_blocks(state, superblock, 0))) 2214 btrfsic_dump_tree(state); 2215 2216 return 0; 2217 } 2218 2219 static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, 2220 struct btrfsic_block *const block, 2221 int recursion_level) 2222 { 2223 const struct btrfsic_block_link *l; 2224 int ret = 0; 2225 2226 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { 2227 /* 2228 * Note that this situation can happen and does not 2229 * indicate an error in regular cases. It happens 2230 * when disk blocks are freed and later reused. 2231 * The check-integrity module is not aware of any 2232 * block free operations, it just recognizes block 2233 * write operations. Therefore it keeps the linkage 2234 * information for a block until a block is 2235 * rewritten. This can temporarily cause incorrect 2236 * and even circular linkage information. This 2237 * causes no harm unless such blocks are referenced 2238 * by the most recent super block. 2239 */ 2240 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2241 pr_info("btrfsic: abort cyclic linkage (case 1).\n"); 2242 2243 return ret; 2244 } 2245 2246 /* 2247 * This algorithm is recursive because the amount of used stack 2248 * space is very small and the max recursion depth is limited. 2249 */ 2250 list_for_each_entry(l, &block->ref_to_list, node_ref_to) { 2251 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2252 pr_info( 2253 "rl=%d, %c @%llu (%pg/%llu/%d) %u* refers to %c @%llu (%pg/%llu/%d)\n", 2254 recursion_level, 2255 btrfsic_get_block_type(state, block), 2256 block->logical_bytenr, block->dev_state->bdev, 2257 block->dev_bytenr, block->mirror_num, 2258 l->ref_cnt, 2259 btrfsic_get_block_type(state, l->block_ref_to), 2260 l->block_ref_to->logical_bytenr, 2261 l->block_ref_to->dev_state->bdev, 2262 l->block_ref_to->dev_bytenr, 2263 l->block_ref_to->mirror_num); 2264 if (l->block_ref_to->never_written) { 2265 pr_info( 2266 "btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which is never written!\n", 2267 btrfsic_get_block_type(state, l->block_ref_to), 2268 l->block_ref_to->logical_bytenr, 2269 l->block_ref_to->dev_state->bdev, 2270 l->block_ref_to->dev_bytenr, 2271 l->block_ref_to->mirror_num); 2272 ret = -1; 2273 } else if (!l->block_ref_to->is_iodone) { 2274 pr_info( 2275 "btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which is not yet iodone!\n", 2276 btrfsic_get_block_type(state, l->block_ref_to), 2277 l->block_ref_to->logical_bytenr, 2278 l->block_ref_to->dev_state->bdev, 2279 l->block_ref_to->dev_bytenr, 2280 l->block_ref_to->mirror_num); 2281 ret = -1; 2282 } else if (l->block_ref_to->iodone_w_error) { 2283 pr_info( 2284 "btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which has write error!\n", 2285 btrfsic_get_block_type(state, l->block_ref_to), 2286 l->block_ref_to->logical_bytenr, 2287 l->block_ref_to->dev_state->bdev, 2288 l->block_ref_to->dev_bytenr, 2289 l->block_ref_to->mirror_num); 2290 ret = -1; 2291 } else if (l->parent_generation != 2292 l->block_ref_to->generation && 2293 BTRFSIC_GENERATION_UNKNOWN != 2294 l->parent_generation && 2295 BTRFSIC_GENERATION_UNKNOWN != 2296 l->block_ref_to->generation) { 2297 pr_info( 2298 "btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) with generation %llu != parent generation %llu!\n", 2299 btrfsic_get_block_type(state, l->block_ref_to), 2300 l->block_ref_to->logical_bytenr, 2301 l->block_ref_to->dev_state->bdev, 2302 l->block_ref_to->dev_bytenr, 2303 l->block_ref_to->mirror_num, 2304 l->block_ref_to->generation, 2305 l->parent_generation); 2306 ret = -1; 2307 } else if (l->block_ref_to->flush_gen > 2308 l->block_ref_to->dev_state->last_flush_gen) { 2309 pr_info( 2310 "btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which is not flushed out of disk's write cache (block flush_gen=%llu, dev->flush_gen=%llu)!\n", 2311 btrfsic_get_block_type(state, l->block_ref_to), 2312 l->block_ref_to->logical_bytenr, 2313 l->block_ref_to->dev_state->bdev, 2314 l->block_ref_to->dev_bytenr, 2315 l->block_ref_to->mirror_num, block->flush_gen, 2316 l->block_ref_to->dev_state->last_flush_gen); 2317 ret = -1; 2318 } else if (-1 == btrfsic_check_all_ref_blocks(state, 2319 l->block_ref_to, 2320 recursion_level + 2321 1)) { 2322 ret = -1; 2323 } 2324 } 2325 2326 return ret; 2327 } 2328 2329 static int btrfsic_is_block_ref_by_superblock( 2330 const struct btrfsic_state *state, 2331 const struct btrfsic_block *block, 2332 int recursion_level) 2333 { 2334 const struct btrfsic_block_link *l; 2335 2336 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { 2337 /* refer to comment at "abort cyclic linkage (case 1)" */ 2338 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2339 pr_info("btrfsic: abort cyclic linkage (case 2).\n"); 2340 2341 return 0; 2342 } 2343 2344 /* 2345 * This algorithm is recursive because the amount of used stack space 2346 * is very small and the max recursion depth is limited. 2347 */ 2348 list_for_each_entry(l, &block->ref_from_list, node_ref_from) { 2349 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2350 pr_info( 2351 "rl=%d, %c @%llu (%pg/%llu/%d) is ref %u* from %c @%llu (%pg/%llu/%d)\n", 2352 recursion_level, 2353 btrfsic_get_block_type(state, block), 2354 block->logical_bytenr, block->dev_state->bdev, 2355 block->dev_bytenr, block->mirror_num, 2356 l->ref_cnt, 2357 btrfsic_get_block_type(state, l->block_ref_from), 2358 l->block_ref_from->logical_bytenr, 2359 l->block_ref_from->dev_state->bdev, 2360 l->block_ref_from->dev_bytenr, 2361 l->block_ref_from->mirror_num); 2362 if (l->block_ref_from->is_superblock && 2363 state->latest_superblock->dev_bytenr == 2364 l->block_ref_from->dev_bytenr && 2365 state->latest_superblock->dev_state->bdev == 2366 l->block_ref_from->dev_state->bdev) 2367 return 1; 2368 else if (btrfsic_is_block_ref_by_superblock(state, 2369 l->block_ref_from, 2370 recursion_level + 2371 1)) 2372 return 1; 2373 } 2374 2375 return 0; 2376 } 2377 2378 static void btrfsic_print_add_link(const struct btrfsic_state *state, 2379 const struct btrfsic_block_link *l) 2380 { 2381 pr_info("add %u* link from %c @%llu (%pg/%llu/%d) to %c @%llu (%pg/%llu/%d)\n", 2382 l->ref_cnt, 2383 btrfsic_get_block_type(state, l->block_ref_from), 2384 l->block_ref_from->logical_bytenr, 2385 l->block_ref_from->dev_state->bdev, 2386 l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, 2387 btrfsic_get_block_type(state, l->block_ref_to), 2388 l->block_ref_to->logical_bytenr, 2389 l->block_ref_to->dev_state->bdev, l->block_ref_to->dev_bytenr, 2390 l->block_ref_to->mirror_num); 2391 } 2392 2393 static void btrfsic_print_rem_link(const struct btrfsic_state *state, 2394 const struct btrfsic_block_link *l) 2395 { 2396 pr_info("rem %u* link from %c @%llu (%pg/%llu/%d) to %c @%llu (%pg/%llu/%d)\n", 2397 l->ref_cnt, 2398 btrfsic_get_block_type(state, l->block_ref_from), 2399 l->block_ref_from->logical_bytenr, 2400 l->block_ref_from->dev_state->bdev, 2401 l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, 2402 btrfsic_get_block_type(state, l->block_ref_to), 2403 l->block_ref_to->logical_bytenr, 2404 l->block_ref_to->dev_state->bdev, l->block_ref_to->dev_bytenr, 2405 l->block_ref_to->mirror_num); 2406 } 2407 2408 static char btrfsic_get_block_type(const struct btrfsic_state *state, 2409 const struct btrfsic_block *block) 2410 { 2411 if (block->is_superblock && 2412 state->latest_superblock->dev_bytenr == block->dev_bytenr && 2413 state->latest_superblock->dev_state->bdev == block->dev_state->bdev) 2414 return 'S'; 2415 else if (block->is_superblock) 2416 return 's'; 2417 else if (block->is_metadata) 2418 return 'M'; 2419 else 2420 return 'D'; 2421 } 2422 2423 static void btrfsic_dump_tree(const struct btrfsic_state *state) 2424 { 2425 btrfsic_dump_tree_sub(state, state->latest_superblock, 0); 2426 } 2427 2428 static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, 2429 const struct btrfsic_block *block, 2430 int indent_level) 2431 { 2432 const struct btrfsic_block_link *l; 2433 int indent_add; 2434 static char buf[80]; 2435 int cursor_position; 2436 2437 /* 2438 * Should better fill an on-stack buffer with a complete line and 2439 * dump it at once when it is time to print a newline character. 2440 */ 2441 2442 /* 2443 * This algorithm is recursive because the amount of used stack space 2444 * is very small and the max recursion depth is limited. 2445 */ 2446 indent_add = sprintf(buf, "%c-%llu(%pg/%llu/%u)", 2447 btrfsic_get_block_type(state, block), 2448 block->logical_bytenr, block->dev_state->bdev, 2449 block->dev_bytenr, block->mirror_num); 2450 if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { 2451 printk("[...]\n"); 2452 return; 2453 } 2454 printk(buf); 2455 indent_level += indent_add; 2456 if (list_empty(&block->ref_to_list)) { 2457 printk("\n"); 2458 return; 2459 } 2460 if (block->mirror_num > 1 && 2461 !(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) { 2462 printk(" [...]\n"); 2463 return; 2464 } 2465 2466 cursor_position = indent_level; 2467 list_for_each_entry(l, &block->ref_to_list, node_ref_to) { 2468 while (cursor_position < indent_level) { 2469 printk(" "); 2470 cursor_position++; 2471 } 2472 if (l->ref_cnt > 1) 2473 indent_add = sprintf(buf, " %d*--> ", l->ref_cnt); 2474 else 2475 indent_add = sprintf(buf, " --> "); 2476 if (indent_level + indent_add > 2477 BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { 2478 printk("[...]\n"); 2479 cursor_position = 0; 2480 continue; 2481 } 2482 2483 printk(buf); 2484 2485 btrfsic_dump_tree_sub(state, l->block_ref_to, 2486 indent_level + indent_add); 2487 cursor_position = 0; 2488 } 2489 } 2490 2491 static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( 2492 struct btrfsic_state *state, 2493 struct btrfsic_block_data_ctx *next_block_ctx, 2494 struct btrfsic_block *next_block, 2495 struct btrfsic_block *from_block, 2496 u64 parent_generation) 2497 { 2498 struct btrfsic_block_link *l; 2499 2500 l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev, 2501 next_block_ctx->dev_bytenr, 2502 from_block->dev_state->bdev, 2503 from_block->dev_bytenr, 2504 &state->block_link_hashtable); 2505 if (NULL == l) { 2506 l = btrfsic_block_link_alloc(); 2507 if (!l) 2508 return NULL; 2509 2510 l->block_ref_to = next_block; 2511 l->block_ref_from = from_block; 2512 l->ref_cnt = 1; 2513 l->parent_generation = parent_generation; 2514 2515 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2516 btrfsic_print_add_link(state, l); 2517 2518 list_add(&l->node_ref_to, &from_block->ref_to_list); 2519 list_add(&l->node_ref_from, &next_block->ref_from_list); 2520 2521 btrfsic_block_link_hashtable_add(l, 2522 &state->block_link_hashtable); 2523 } else { 2524 l->ref_cnt++; 2525 l->parent_generation = parent_generation; 2526 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2527 btrfsic_print_add_link(state, l); 2528 } 2529 2530 return l; 2531 } 2532 2533 static struct btrfsic_block *btrfsic_block_lookup_or_add( 2534 struct btrfsic_state *state, 2535 struct btrfsic_block_data_ctx *block_ctx, 2536 const char *additional_string, 2537 int is_metadata, 2538 int is_iodone, 2539 int never_written, 2540 int mirror_num, 2541 int *was_created) 2542 { 2543 struct btrfsic_block *block; 2544 2545 block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev, 2546 block_ctx->dev_bytenr, 2547 &state->block_hashtable); 2548 if (NULL == block) { 2549 struct btrfsic_dev_state *dev_state; 2550 2551 block = btrfsic_block_alloc(); 2552 if (!block) 2553 return NULL; 2554 2555 dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev->bd_dev); 2556 if (NULL == dev_state) { 2557 pr_info("btrfsic: error, lookup dev_state failed!\n"); 2558 btrfsic_block_free(block); 2559 return NULL; 2560 } 2561 block->dev_state = dev_state; 2562 block->dev_bytenr = block_ctx->dev_bytenr; 2563 block->logical_bytenr = block_ctx->start; 2564 block->is_metadata = is_metadata; 2565 block->is_iodone = is_iodone; 2566 block->never_written = never_written; 2567 block->mirror_num = mirror_num; 2568 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2569 pr_info("New %s%c-block @%llu (%pg/%llu/%d)\n", 2570 additional_string, 2571 btrfsic_get_block_type(state, block), 2572 block->logical_bytenr, dev_state->bdev, 2573 block->dev_bytenr, mirror_num); 2574 list_add(&block->all_blocks_node, &state->all_blocks_list); 2575 btrfsic_block_hashtable_add(block, &state->block_hashtable); 2576 if (NULL != was_created) 2577 *was_created = 1; 2578 } else { 2579 if (NULL != was_created) 2580 *was_created = 0; 2581 } 2582 2583 return block; 2584 } 2585 2586 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 2587 u64 bytenr, 2588 struct btrfsic_dev_state *dev_state, 2589 u64 dev_bytenr) 2590 { 2591 struct btrfs_fs_info *fs_info = state->fs_info; 2592 struct btrfsic_block_data_ctx block_ctx; 2593 int num_copies; 2594 int mirror_num; 2595 int match = 0; 2596 int ret; 2597 2598 num_copies = btrfs_num_copies(fs_info, bytenr, state->metablock_size); 2599 2600 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2601 ret = btrfsic_map_block(state, bytenr, state->metablock_size, 2602 &block_ctx, mirror_num); 2603 if (ret) { 2604 pr_info("btrfsic: btrfsic_map_block(logical @%llu, mirror %d) failed!\n", 2605 bytenr, mirror_num); 2606 continue; 2607 } 2608 2609 if (dev_state->bdev == block_ctx.dev->bdev && 2610 dev_bytenr == block_ctx.dev_bytenr) { 2611 match++; 2612 btrfsic_release_block_ctx(&block_ctx); 2613 break; 2614 } 2615 btrfsic_release_block_ctx(&block_ctx); 2616 } 2617 2618 if (WARN_ON(!match)) { 2619 pr_info( 2620 "btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio, buffer->log_bytenr=%llu, submit_bio(bdev=%pg, phys_bytenr=%llu)!\n", 2621 bytenr, dev_state->bdev, dev_bytenr); 2622 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2623 ret = btrfsic_map_block(state, bytenr, 2624 state->metablock_size, 2625 &block_ctx, mirror_num); 2626 if (ret) 2627 continue; 2628 2629 pr_info("read logical bytenr @%llu maps to (%pg/%llu/%d)\n", 2630 bytenr, block_ctx.dev->bdev, 2631 block_ctx.dev_bytenr, mirror_num); 2632 } 2633 } 2634 } 2635 2636 static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev) 2637 { 2638 return btrfsic_dev_state_hashtable_lookup(dev, 2639 &btrfsic_dev_state_hashtable); 2640 } 2641 2642 static void btrfsic_check_write_bio(struct bio *bio, struct btrfsic_dev_state *dev_state) 2643 { 2644 unsigned int segs = bio_segments(bio); 2645 u64 dev_bytenr = 512 * bio->bi_iter.bi_sector; 2646 u64 cur_bytenr = dev_bytenr; 2647 struct bvec_iter iter; 2648 struct bio_vec bvec; 2649 char **mapped_datav; 2650 int bio_is_patched = 0; 2651 int i = 0; 2652 2653 if (dev_state->state->print_mask & BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2654 pr_info( 2655 "submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", 2656 bio_op(bio), bio->bi_opf, segs, 2657 bio->bi_iter.bi_sector, dev_bytenr, bio->bi_bdev); 2658 2659 mapped_datav = kmalloc_array(segs, sizeof(*mapped_datav), GFP_NOFS); 2660 if (!mapped_datav) 2661 return; 2662 2663 bio_for_each_segment(bvec, bio, iter) { 2664 BUG_ON(bvec.bv_len != PAGE_SIZE); 2665 mapped_datav[i] = page_address(bvec.bv_page); 2666 i++; 2667 2668 if (dev_state->state->print_mask & 2669 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE) 2670 pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n", 2671 i, cur_bytenr, bvec.bv_len, bvec.bv_offset); 2672 cur_bytenr += bvec.bv_len; 2673 } 2674 2675 btrfsic_process_written_block(dev_state, dev_bytenr, mapped_datav, segs, 2676 bio, &bio_is_patched, bio->bi_opf); 2677 kfree(mapped_datav); 2678 } 2679 2680 static void btrfsic_check_flush_bio(struct bio *bio, struct btrfsic_dev_state *dev_state) 2681 { 2682 if (dev_state->state->print_mask & BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2683 pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n", 2684 bio_op(bio), bio->bi_opf, bio->bi_bdev); 2685 2686 if (dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2687 struct btrfsic_block *const block = 2688 &dev_state->dummy_block_for_bio_bh_flush; 2689 2690 block->is_iodone = 0; 2691 block->never_written = 0; 2692 block->iodone_w_error = 0; 2693 block->flush_gen = dev_state->last_flush_gen + 1; 2694 block->submit_bio_bh_rw = bio->bi_opf; 2695 block->orig_bio_private = bio->bi_private; 2696 block->orig_bio_end_io = bio->bi_end_io; 2697 block->next_in_same_bio = NULL; 2698 bio->bi_private = block; 2699 bio->bi_end_io = btrfsic_bio_end_io; 2700 } else if ((dev_state->state->print_mask & 2701 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 2702 BTRFSIC_PRINT_MASK_VERBOSE))) { 2703 pr_info( 2704 "btrfsic_submit_bio(%pg) with FLUSH but dummy block already in use (ignored)!\n", 2705 dev_state->bdev); 2706 } 2707 } 2708 2709 void btrfsic_check_bio(struct bio *bio) 2710 { 2711 struct btrfsic_dev_state *dev_state; 2712 2713 if (!btrfsic_is_initialized) 2714 return; 2715 2716 /* 2717 * We can be called before btrfsic_mount, so there might not be a 2718 * dev_state. 2719 */ 2720 dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev); 2721 mutex_lock(&btrfsic_mutex); 2722 if (dev_state) { 2723 if (bio_op(bio) == REQ_OP_WRITE && bio_has_data(bio)) 2724 btrfsic_check_write_bio(bio, dev_state); 2725 else if (bio->bi_opf & REQ_PREFLUSH) 2726 btrfsic_check_flush_bio(bio, dev_state); 2727 } 2728 mutex_unlock(&btrfsic_mutex); 2729 } 2730 2731 int btrfsic_mount(struct btrfs_fs_info *fs_info, 2732 struct btrfs_fs_devices *fs_devices, 2733 int including_extent_data, u32 print_mask) 2734 { 2735 int ret; 2736 struct btrfsic_state *state; 2737 struct list_head *dev_head = &fs_devices->devices; 2738 struct btrfs_device *device; 2739 2740 if (!PAGE_ALIGNED(fs_info->nodesize)) { 2741 pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n", 2742 fs_info->nodesize, PAGE_SIZE); 2743 return -1; 2744 } 2745 if (!PAGE_ALIGNED(fs_info->sectorsize)) { 2746 pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n", 2747 fs_info->sectorsize, PAGE_SIZE); 2748 return -1; 2749 } 2750 state = kvzalloc(sizeof(*state), GFP_KERNEL); 2751 if (!state) 2752 return -ENOMEM; 2753 2754 if (!btrfsic_is_initialized) { 2755 mutex_init(&btrfsic_mutex); 2756 btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable); 2757 btrfsic_is_initialized = 1; 2758 } 2759 mutex_lock(&btrfsic_mutex); 2760 state->fs_info = fs_info; 2761 state->print_mask = print_mask; 2762 state->include_extent_data = including_extent_data; 2763 state->metablock_size = fs_info->nodesize; 2764 state->datablock_size = fs_info->sectorsize; 2765 INIT_LIST_HEAD(&state->all_blocks_list); 2766 btrfsic_block_hashtable_init(&state->block_hashtable); 2767 btrfsic_block_link_hashtable_init(&state->block_link_hashtable); 2768 state->max_superblock_generation = 0; 2769 state->latest_superblock = NULL; 2770 2771 list_for_each_entry(device, dev_head, dev_list) { 2772 struct btrfsic_dev_state *ds; 2773 2774 if (!device->bdev || !device->name) 2775 continue; 2776 2777 ds = btrfsic_dev_state_alloc(); 2778 if (NULL == ds) { 2779 mutex_unlock(&btrfsic_mutex); 2780 return -ENOMEM; 2781 } 2782 ds->bdev = device->bdev; 2783 ds->state = state; 2784 btrfsic_dev_state_hashtable_add(ds, 2785 &btrfsic_dev_state_hashtable); 2786 } 2787 2788 ret = btrfsic_process_superblock(state, fs_devices); 2789 if (0 != ret) { 2790 mutex_unlock(&btrfsic_mutex); 2791 btrfsic_unmount(fs_devices); 2792 return ret; 2793 } 2794 2795 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE) 2796 btrfsic_dump_database(state); 2797 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE) 2798 btrfsic_dump_tree(state); 2799 2800 mutex_unlock(&btrfsic_mutex); 2801 return 0; 2802 } 2803 2804 void btrfsic_unmount(struct btrfs_fs_devices *fs_devices) 2805 { 2806 struct btrfsic_block *b_all, *tmp_all; 2807 struct btrfsic_state *state; 2808 struct list_head *dev_head = &fs_devices->devices; 2809 struct btrfs_device *device; 2810 2811 if (!btrfsic_is_initialized) 2812 return; 2813 2814 mutex_lock(&btrfsic_mutex); 2815 2816 state = NULL; 2817 list_for_each_entry(device, dev_head, dev_list) { 2818 struct btrfsic_dev_state *ds; 2819 2820 if (!device->bdev || !device->name) 2821 continue; 2822 2823 ds = btrfsic_dev_state_hashtable_lookup( 2824 device->bdev->bd_dev, 2825 &btrfsic_dev_state_hashtable); 2826 if (NULL != ds) { 2827 state = ds->state; 2828 btrfsic_dev_state_hashtable_remove(ds); 2829 btrfsic_dev_state_free(ds); 2830 } 2831 } 2832 2833 if (NULL == state) { 2834 pr_info("btrfsic: error, cannot find state information on umount!\n"); 2835 mutex_unlock(&btrfsic_mutex); 2836 return; 2837 } 2838 2839 /* 2840 * Don't care about keeping the lists' state up to date, 2841 * just free all memory that was allocated dynamically. 2842 * Free the blocks and the block_links. 2843 */ 2844 list_for_each_entry_safe(b_all, tmp_all, &state->all_blocks_list, 2845 all_blocks_node) { 2846 struct btrfsic_block_link *l, *tmp; 2847 2848 list_for_each_entry_safe(l, tmp, &b_all->ref_to_list, 2849 node_ref_to) { 2850 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2851 btrfsic_print_rem_link(state, l); 2852 2853 l->ref_cnt--; 2854 if (0 == l->ref_cnt) 2855 btrfsic_block_link_free(l); 2856 } 2857 2858 if (b_all->is_iodone || b_all->never_written) 2859 btrfsic_block_free(b_all); 2860 else 2861 pr_info( 2862 "btrfs: attempt to free %c-block @%llu (%pg/%llu/%d) on umount which is not yet iodone!\n", 2863 btrfsic_get_block_type(state, b_all), 2864 b_all->logical_bytenr, b_all->dev_state->bdev, 2865 b_all->dev_bytenr, b_all->mirror_num); 2866 } 2867 2868 mutex_unlock(&btrfsic_mutex); 2869 2870 kvfree(state); 2871 } 2872