1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) STRATO AG 2011. All rights reserved. 4 */ 5 6 /* 7 * This module can be used to catch cases when the btrfs kernel 8 * code executes write requests to the disk that bring the file 9 * system in an inconsistent state. In such a state, a power-loss 10 * or kernel panic event would cause that the data on disk is 11 * lost or at least damaged. 12 * 13 * Code is added that examines all block write requests during 14 * runtime (including writes of the super block). Three rules 15 * are verified and an error is printed on violation of the 16 * rules: 17 * 1. It is not allowed to write a disk block which is 18 * currently referenced by the super block (either directly 19 * or indirectly). 20 * 2. When a super block is written, it is verified that all 21 * referenced (directly or indirectly) blocks fulfill the 22 * following requirements: 23 * 2a. All referenced blocks have either been present when 24 * the file system was mounted, (i.e., they have been 25 * referenced by the super block) or they have been 26 * written since then and the write completion callback 27 * was called and no write error was indicated and a 28 * FLUSH request to the device where these blocks are 29 * located was received and completed. 30 * 2b. All referenced blocks need to have a generation 31 * number which is equal to the parent's number. 32 * 33 * One issue that was found using this module was that the log 34 * tree on disk became temporarily corrupted because disk blocks 35 * that had been in use for the log tree had been freed and 36 * reused too early, while being referenced by the written super 37 * block. 38 * 39 * The search term in the kernel log that can be used to filter 40 * on the existence of detected integrity issues is 41 * "btrfs: attempt". 42 * 43 * The integrity check is enabled via mount options. These 44 * mount options are only supported if the integrity check 45 * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY. 46 * 47 * Example #1, apply integrity checks to all metadata: 48 * mount /dev/sdb1 /mnt -o check_int 49 * 50 * Example #2, apply integrity checks to all metadata and 51 * to data extents: 52 * mount /dev/sdb1 /mnt -o check_int_data 53 * 54 * Example #3, apply integrity checks to all metadata and dump 55 * the tree that the super block references to kernel messages 56 * each time after a super block was written: 57 * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263 58 * 59 * If the integrity check tool is included and activated in 60 * the mount options, plenty of kernel memory is used, and 61 * plenty of additional CPU cycles are spent. Enabling this 62 * functionality is not intended for normal use. In most 63 * cases, unless you are a btrfs developer who needs to verify 64 * the integrity of (super)-block write requests, do not 65 * enable the config option BTRFS_FS_CHECK_INTEGRITY to 66 * include and compile the integrity check tool. 67 * 68 * Expect millions of lines of information in the kernel log with an 69 * enabled check_int_print_mask. Therefore set LOG_BUF_SHIFT in the 70 * kernel config to at least 26 (which is 64MB). Usually the value is 71 * limited to 21 (which is 2MB) in init/Kconfig. The file needs to be 72 * changed like this before LOG_BUF_SHIFT can be set to a high value: 73 * config LOG_BUF_SHIFT 74 * int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" 75 * range 12 30 76 */ 77 78 #include <linux/sched.h> 79 #include <linux/slab.h> 80 #include <linux/mutex.h> 81 #include <linux/genhd.h> 82 #include <linux/blkdev.h> 83 #include <linux/mm.h> 84 #include <linux/string.h> 85 #include <crypto/hash.h> 86 #include "ctree.h" 87 #include "disk-io.h" 88 #include "transaction.h" 89 #include "extent_io.h" 90 #include "volumes.h" 91 #include "print-tree.h" 92 #include "locking.h" 93 #include "check-integrity.h" 94 #include "rcu-string.h" 95 #include "compression.h" 96 97 #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 98 #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 99 #define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100 100 #define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051 101 #define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807 102 #define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530 103 #define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300 104 #define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters, 105 * excluding " [...]" */ 106 #define BTRFSIC_GENERATION_UNKNOWN ((u64)-1) 107 108 /* 109 * The definition of the bitmask fields for the print_mask. 110 * They are specified with the mount option check_integrity_print_mask. 111 */ 112 #define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE 0x00000001 113 #define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION 0x00000002 114 #define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE 0x00000004 115 #define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE 0x00000008 116 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH 0x00000010 117 #define BTRFSIC_PRINT_MASK_END_IO_BIO_BH 0x00000020 118 #define BTRFSIC_PRINT_MASK_VERBOSE 0x00000040 119 #define BTRFSIC_PRINT_MASK_VERY_VERBOSE 0x00000080 120 #define BTRFSIC_PRINT_MASK_INITIAL_TREE 0x00000100 121 #define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES 0x00000200 122 #define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400 123 #define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800 124 #define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000 125 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE 0x00002000 126 127 struct btrfsic_dev_state; 128 struct btrfsic_state; 129 130 struct btrfsic_block { 131 u32 magic_num; /* only used for debug purposes */ 132 unsigned int is_metadata:1; /* if it is meta-data, not data-data */ 133 unsigned int is_superblock:1; /* if it is one of the superblocks */ 134 unsigned int is_iodone:1; /* if is done by lower subsystem */ 135 unsigned int iodone_w_error:1; /* error was indicated to endio */ 136 unsigned int never_written:1; /* block was added because it was 137 * referenced, not because it was 138 * written */ 139 unsigned int mirror_num; /* large enough to hold 140 * BTRFS_SUPER_MIRROR_MAX */ 141 struct btrfsic_dev_state *dev_state; 142 u64 dev_bytenr; /* key, physical byte num on disk */ 143 u64 logical_bytenr; /* logical byte num on disk */ 144 u64 generation; 145 struct btrfs_disk_key disk_key; /* extra info to print in case of 146 * issues, will not always be correct */ 147 struct list_head collision_resolving_node; /* list node */ 148 struct list_head all_blocks_node; /* list node */ 149 150 /* the following two lists contain block_link items */ 151 struct list_head ref_to_list; /* list */ 152 struct list_head ref_from_list; /* list */ 153 struct btrfsic_block *next_in_same_bio; 154 void *orig_bio_private; 155 bio_end_io_t *orig_bio_end_io; 156 int submit_bio_bh_rw; 157 u64 flush_gen; /* only valid if !never_written */ 158 }; 159 160 /* 161 * Elements of this type are allocated dynamically and required because 162 * each block object can refer to and can be ref from multiple blocks. 163 * The key to lookup them in the hashtable is the dev_bytenr of 164 * the block ref to plus the one from the block referred from. 165 * The fact that they are searchable via a hashtable and that a 166 * ref_cnt is maintained is not required for the btrfs integrity 167 * check algorithm itself, it is only used to make the output more 168 * beautiful in case that an error is detected (an error is defined 169 * as a write operation to a block while that block is still referenced). 170 */ 171 struct btrfsic_block_link { 172 u32 magic_num; /* only used for debug purposes */ 173 u32 ref_cnt; 174 struct list_head node_ref_to; /* list node */ 175 struct list_head node_ref_from; /* list node */ 176 struct list_head collision_resolving_node; /* list node */ 177 struct btrfsic_block *block_ref_to; 178 struct btrfsic_block *block_ref_from; 179 u64 parent_generation; 180 }; 181 182 struct btrfsic_dev_state { 183 u32 magic_num; /* only used for debug purposes */ 184 struct block_device *bdev; 185 struct btrfsic_state *state; 186 struct list_head collision_resolving_node; /* list node */ 187 struct btrfsic_block dummy_block_for_bio_bh_flush; 188 u64 last_flush_gen; 189 char name[BDEVNAME_SIZE]; 190 }; 191 192 struct btrfsic_block_hashtable { 193 struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE]; 194 }; 195 196 struct btrfsic_block_link_hashtable { 197 struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE]; 198 }; 199 200 struct btrfsic_dev_state_hashtable { 201 struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE]; 202 }; 203 204 struct btrfsic_block_data_ctx { 205 u64 start; /* virtual bytenr */ 206 u64 dev_bytenr; /* physical bytenr on device */ 207 u32 len; 208 struct btrfsic_dev_state *dev; 209 char **datav; 210 struct page **pagev; 211 void *mem_to_free; 212 }; 213 214 /* This structure is used to implement recursion without occupying 215 * any stack space, refer to btrfsic_process_metablock() */ 216 struct btrfsic_stack_frame { 217 u32 magic; 218 u32 nr; 219 int error; 220 int i; 221 int limit_nesting; 222 int num_copies; 223 int mirror_num; 224 struct btrfsic_block *block; 225 struct btrfsic_block_data_ctx *block_ctx; 226 struct btrfsic_block *next_block; 227 struct btrfsic_block_data_ctx next_block_ctx; 228 struct btrfs_header *hdr; 229 struct btrfsic_stack_frame *prev; 230 }; 231 232 /* Some state per mounted filesystem */ 233 struct btrfsic_state { 234 u32 print_mask; 235 int include_extent_data; 236 struct list_head all_blocks_list; 237 struct btrfsic_block_hashtable block_hashtable; 238 struct btrfsic_block_link_hashtable block_link_hashtable; 239 struct btrfs_fs_info *fs_info; 240 u64 max_superblock_generation; 241 struct btrfsic_block *latest_superblock; 242 u32 metablock_size; 243 u32 datablock_size; 244 }; 245 246 static int btrfsic_process_metablock(struct btrfsic_state *state, 247 struct btrfsic_block *block, 248 struct btrfsic_block_data_ctx *block_ctx, 249 int limit_nesting, int force_iodone_flag); 250 static void btrfsic_read_from_block_data( 251 struct btrfsic_block_data_ctx *block_ctx, 252 void *dst, u32 offset, size_t len); 253 static int btrfsic_create_link_to_next_block( 254 struct btrfsic_state *state, 255 struct btrfsic_block *block, 256 struct btrfsic_block_data_ctx 257 *block_ctx, u64 next_bytenr, 258 int limit_nesting, 259 struct btrfsic_block_data_ctx *next_block_ctx, 260 struct btrfsic_block **next_blockp, 261 int force_iodone_flag, 262 int *num_copiesp, int *mirror_nump, 263 struct btrfs_disk_key *disk_key, 264 u64 parent_generation); 265 static int btrfsic_handle_extent_data(struct btrfsic_state *state, 266 struct btrfsic_block *block, 267 struct btrfsic_block_data_ctx *block_ctx, 268 u32 item_offset, int force_iodone_flag); 269 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 270 struct btrfsic_block_data_ctx *block_ctx_out, 271 int mirror_num); 272 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx); 273 static int btrfsic_read_block(struct btrfsic_state *state, 274 struct btrfsic_block_data_ctx *block_ctx); 275 static int btrfsic_process_written_superblock( 276 struct btrfsic_state *state, 277 struct btrfsic_block *const block, 278 struct btrfs_super_block *const super_hdr); 279 static void btrfsic_bio_end_io(struct bio *bp); 280 static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state, 281 const struct btrfsic_block *block, 282 int recursion_level); 283 static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, 284 struct btrfsic_block *const block, 285 int recursion_level); 286 static void btrfsic_print_add_link(const struct btrfsic_state *state, 287 const struct btrfsic_block_link *l); 288 static void btrfsic_print_rem_link(const struct btrfsic_state *state, 289 const struct btrfsic_block_link *l); 290 static char btrfsic_get_block_type(const struct btrfsic_state *state, 291 const struct btrfsic_block *block); 292 static void btrfsic_dump_tree(const struct btrfsic_state *state); 293 static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, 294 const struct btrfsic_block *block, 295 int indent_level); 296 static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( 297 struct btrfsic_state *state, 298 struct btrfsic_block_data_ctx *next_block_ctx, 299 struct btrfsic_block *next_block, 300 struct btrfsic_block *from_block, 301 u64 parent_generation); 302 static struct btrfsic_block *btrfsic_block_lookup_or_add( 303 struct btrfsic_state *state, 304 struct btrfsic_block_data_ctx *block_ctx, 305 const char *additional_string, 306 int is_metadata, 307 int is_iodone, 308 int never_written, 309 int mirror_num, 310 int *was_created); 311 static int btrfsic_process_superblock_dev_mirror( 312 struct btrfsic_state *state, 313 struct btrfsic_dev_state *dev_state, 314 struct btrfs_device *device, 315 int superblock_mirror_num, 316 struct btrfsic_dev_state **selected_dev_state, 317 struct btrfs_super_block *selected_super); 318 static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev); 319 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 320 u64 bytenr, 321 struct btrfsic_dev_state *dev_state, 322 u64 dev_bytenr); 323 324 static struct mutex btrfsic_mutex; 325 static int btrfsic_is_initialized; 326 static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable; 327 328 329 static void btrfsic_block_init(struct btrfsic_block *b) 330 { 331 b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER; 332 b->dev_state = NULL; 333 b->dev_bytenr = 0; 334 b->logical_bytenr = 0; 335 b->generation = BTRFSIC_GENERATION_UNKNOWN; 336 b->disk_key.objectid = 0; 337 b->disk_key.type = 0; 338 b->disk_key.offset = 0; 339 b->is_metadata = 0; 340 b->is_superblock = 0; 341 b->is_iodone = 0; 342 b->iodone_w_error = 0; 343 b->never_written = 0; 344 b->mirror_num = 0; 345 b->next_in_same_bio = NULL; 346 b->orig_bio_private = NULL; 347 b->orig_bio_end_io = NULL; 348 INIT_LIST_HEAD(&b->collision_resolving_node); 349 INIT_LIST_HEAD(&b->all_blocks_node); 350 INIT_LIST_HEAD(&b->ref_to_list); 351 INIT_LIST_HEAD(&b->ref_from_list); 352 b->submit_bio_bh_rw = 0; 353 b->flush_gen = 0; 354 } 355 356 static struct btrfsic_block *btrfsic_block_alloc(void) 357 { 358 struct btrfsic_block *b; 359 360 b = kzalloc(sizeof(*b), GFP_NOFS); 361 if (NULL != b) 362 btrfsic_block_init(b); 363 364 return b; 365 } 366 367 static void btrfsic_block_free(struct btrfsic_block *b) 368 { 369 BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num)); 370 kfree(b); 371 } 372 373 static void btrfsic_block_link_init(struct btrfsic_block_link *l) 374 { 375 l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER; 376 l->ref_cnt = 1; 377 INIT_LIST_HEAD(&l->node_ref_to); 378 INIT_LIST_HEAD(&l->node_ref_from); 379 INIT_LIST_HEAD(&l->collision_resolving_node); 380 l->block_ref_to = NULL; 381 l->block_ref_from = NULL; 382 } 383 384 static struct btrfsic_block_link *btrfsic_block_link_alloc(void) 385 { 386 struct btrfsic_block_link *l; 387 388 l = kzalloc(sizeof(*l), GFP_NOFS); 389 if (NULL != l) 390 btrfsic_block_link_init(l); 391 392 return l; 393 } 394 395 static void btrfsic_block_link_free(struct btrfsic_block_link *l) 396 { 397 BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num)); 398 kfree(l); 399 } 400 401 static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds) 402 { 403 ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER; 404 ds->bdev = NULL; 405 ds->state = NULL; 406 ds->name[0] = '\0'; 407 INIT_LIST_HEAD(&ds->collision_resolving_node); 408 ds->last_flush_gen = 0; 409 btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush); 410 ds->dummy_block_for_bio_bh_flush.is_iodone = 1; 411 ds->dummy_block_for_bio_bh_flush.dev_state = ds; 412 } 413 414 static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void) 415 { 416 struct btrfsic_dev_state *ds; 417 418 ds = kzalloc(sizeof(*ds), GFP_NOFS); 419 if (NULL != ds) 420 btrfsic_dev_state_init(ds); 421 422 return ds; 423 } 424 425 static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds) 426 { 427 BUG_ON(!(NULL == ds || 428 BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num)); 429 kfree(ds); 430 } 431 432 static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h) 433 { 434 int i; 435 436 for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++) 437 INIT_LIST_HEAD(h->table + i); 438 } 439 440 static void btrfsic_block_hashtable_add(struct btrfsic_block *b, 441 struct btrfsic_block_hashtable *h) 442 { 443 const unsigned int hashval = 444 (((unsigned int)(b->dev_bytenr >> 16)) ^ 445 ((unsigned int)((uintptr_t)b->dev_state->bdev))) & 446 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); 447 448 list_add(&b->collision_resolving_node, h->table + hashval); 449 } 450 451 static void btrfsic_block_hashtable_remove(struct btrfsic_block *b) 452 { 453 list_del(&b->collision_resolving_node); 454 } 455 456 static struct btrfsic_block *btrfsic_block_hashtable_lookup( 457 struct block_device *bdev, 458 u64 dev_bytenr, 459 struct btrfsic_block_hashtable *h) 460 { 461 const unsigned int hashval = 462 (((unsigned int)(dev_bytenr >> 16)) ^ 463 ((unsigned int)((uintptr_t)bdev))) & 464 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); 465 struct btrfsic_block *b; 466 467 list_for_each_entry(b, h->table + hashval, collision_resolving_node) { 468 if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr) 469 return b; 470 } 471 472 return NULL; 473 } 474 475 static void btrfsic_block_link_hashtable_init( 476 struct btrfsic_block_link_hashtable *h) 477 { 478 int i; 479 480 for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++) 481 INIT_LIST_HEAD(h->table + i); 482 } 483 484 static void btrfsic_block_link_hashtable_add( 485 struct btrfsic_block_link *l, 486 struct btrfsic_block_link_hashtable *h) 487 { 488 const unsigned int hashval = 489 (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^ 490 ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^ 491 ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^ 492 ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev))) 493 & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); 494 495 BUG_ON(NULL == l->block_ref_to); 496 BUG_ON(NULL == l->block_ref_from); 497 list_add(&l->collision_resolving_node, h->table + hashval); 498 } 499 500 static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l) 501 { 502 list_del(&l->collision_resolving_node); 503 } 504 505 static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup( 506 struct block_device *bdev_ref_to, 507 u64 dev_bytenr_ref_to, 508 struct block_device *bdev_ref_from, 509 u64 dev_bytenr_ref_from, 510 struct btrfsic_block_link_hashtable *h) 511 { 512 const unsigned int hashval = 513 (((unsigned int)(dev_bytenr_ref_to >> 16)) ^ 514 ((unsigned int)(dev_bytenr_ref_from >> 16)) ^ 515 ((unsigned int)((uintptr_t)bdev_ref_to)) ^ 516 ((unsigned int)((uintptr_t)bdev_ref_from))) & 517 (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); 518 struct btrfsic_block_link *l; 519 520 list_for_each_entry(l, h->table + hashval, collision_resolving_node) { 521 BUG_ON(NULL == l->block_ref_to); 522 BUG_ON(NULL == l->block_ref_from); 523 if (l->block_ref_to->dev_state->bdev == bdev_ref_to && 524 l->block_ref_to->dev_bytenr == dev_bytenr_ref_to && 525 l->block_ref_from->dev_state->bdev == bdev_ref_from && 526 l->block_ref_from->dev_bytenr == dev_bytenr_ref_from) 527 return l; 528 } 529 530 return NULL; 531 } 532 533 static void btrfsic_dev_state_hashtable_init( 534 struct btrfsic_dev_state_hashtable *h) 535 { 536 int i; 537 538 for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++) 539 INIT_LIST_HEAD(h->table + i); 540 } 541 542 static void btrfsic_dev_state_hashtable_add( 543 struct btrfsic_dev_state *ds, 544 struct btrfsic_dev_state_hashtable *h) 545 { 546 const unsigned int hashval = 547 (((unsigned int)((uintptr_t)ds->bdev->bd_dev)) & 548 (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1)); 549 550 list_add(&ds->collision_resolving_node, h->table + hashval); 551 } 552 553 static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds) 554 { 555 list_del(&ds->collision_resolving_node); 556 } 557 558 static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev, 559 struct btrfsic_dev_state_hashtable *h) 560 { 561 const unsigned int hashval = 562 dev & (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1); 563 struct btrfsic_dev_state *ds; 564 565 list_for_each_entry(ds, h->table + hashval, collision_resolving_node) { 566 if (ds->bdev->bd_dev == dev) 567 return ds; 568 } 569 570 return NULL; 571 } 572 573 static int btrfsic_process_superblock(struct btrfsic_state *state, 574 struct btrfs_fs_devices *fs_devices) 575 { 576 struct btrfs_super_block *selected_super; 577 struct list_head *dev_head = &fs_devices->devices; 578 struct btrfs_device *device; 579 struct btrfsic_dev_state *selected_dev_state = NULL; 580 int ret = 0; 581 int pass; 582 583 selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS); 584 if (!selected_super) 585 return -ENOMEM; 586 587 list_for_each_entry(device, dev_head, dev_list) { 588 int i; 589 struct btrfsic_dev_state *dev_state; 590 591 if (!device->bdev || !device->name) 592 continue; 593 594 dev_state = btrfsic_dev_state_lookup(device->bdev->bd_dev); 595 BUG_ON(NULL == dev_state); 596 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 597 ret = btrfsic_process_superblock_dev_mirror( 598 state, dev_state, device, i, 599 &selected_dev_state, selected_super); 600 if (0 != ret && 0 == i) { 601 kfree(selected_super); 602 return ret; 603 } 604 } 605 } 606 607 if (NULL == state->latest_superblock) { 608 pr_info("btrfsic: no superblock found!\n"); 609 kfree(selected_super); 610 return -1; 611 } 612 613 for (pass = 0; pass < 3; pass++) { 614 int num_copies; 615 int mirror_num; 616 u64 next_bytenr; 617 618 switch (pass) { 619 case 0: 620 next_bytenr = btrfs_super_root(selected_super); 621 if (state->print_mask & 622 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 623 pr_info("root@%llu\n", next_bytenr); 624 break; 625 case 1: 626 next_bytenr = btrfs_super_chunk_root(selected_super); 627 if (state->print_mask & 628 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 629 pr_info("chunk@%llu\n", next_bytenr); 630 break; 631 case 2: 632 next_bytenr = btrfs_super_log_root(selected_super); 633 if (0 == next_bytenr) 634 continue; 635 if (state->print_mask & 636 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 637 pr_info("log@%llu\n", next_bytenr); 638 break; 639 } 640 641 num_copies = btrfs_num_copies(state->fs_info, next_bytenr, 642 state->metablock_size); 643 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 644 pr_info("num_copies(log_bytenr=%llu) = %d\n", 645 next_bytenr, num_copies); 646 647 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 648 struct btrfsic_block *next_block; 649 struct btrfsic_block_data_ctx tmp_next_block_ctx; 650 struct btrfsic_block_link *l; 651 652 ret = btrfsic_map_block(state, next_bytenr, 653 state->metablock_size, 654 &tmp_next_block_ctx, 655 mirror_num); 656 if (ret) { 657 pr_info("btrfsic: btrfsic_map_block(root @%llu, mirror %d) failed!\n", 658 next_bytenr, mirror_num); 659 kfree(selected_super); 660 return -1; 661 } 662 663 next_block = btrfsic_block_hashtable_lookup( 664 tmp_next_block_ctx.dev->bdev, 665 tmp_next_block_ctx.dev_bytenr, 666 &state->block_hashtable); 667 BUG_ON(NULL == next_block); 668 669 l = btrfsic_block_link_hashtable_lookup( 670 tmp_next_block_ctx.dev->bdev, 671 tmp_next_block_ctx.dev_bytenr, 672 state->latest_superblock->dev_state-> 673 bdev, 674 state->latest_superblock->dev_bytenr, 675 &state->block_link_hashtable); 676 BUG_ON(NULL == l); 677 678 ret = btrfsic_read_block(state, &tmp_next_block_ctx); 679 if (ret < (int)PAGE_SIZE) { 680 pr_info("btrfsic: read @logical %llu failed!\n", 681 tmp_next_block_ctx.start); 682 btrfsic_release_block_ctx(&tmp_next_block_ctx); 683 kfree(selected_super); 684 return -1; 685 } 686 687 ret = btrfsic_process_metablock(state, 688 next_block, 689 &tmp_next_block_ctx, 690 BTRFS_MAX_LEVEL + 3, 1); 691 btrfsic_release_block_ctx(&tmp_next_block_ctx); 692 } 693 } 694 695 kfree(selected_super); 696 return ret; 697 } 698 699 static int btrfsic_process_superblock_dev_mirror( 700 struct btrfsic_state *state, 701 struct btrfsic_dev_state *dev_state, 702 struct btrfs_device *device, 703 int superblock_mirror_num, 704 struct btrfsic_dev_state **selected_dev_state, 705 struct btrfs_super_block *selected_super) 706 { 707 struct btrfs_fs_info *fs_info = state->fs_info; 708 struct btrfs_super_block *super_tmp; 709 u64 dev_bytenr; 710 struct btrfsic_block *superblock_tmp; 711 int pass; 712 struct block_device *const superblock_bdev = device->bdev; 713 struct page *page; 714 struct address_space *mapping = superblock_bdev->bd_inode->i_mapping; 715 int ret = 0; 716 717 /* super block bytenr is always the unmapped device bytenr */ 718 dev_bytenr = btrfs_sb_offset(superblock_mirror_num); 719 if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->commit_total_bytes) 720 return -1; 721 722 page = read_cache_page_gfp(mapping, dev_bytenr >> PAGE_SHIFT, GFP_NOFS); 723 if (IS_ERR(page)) 724 return -1; 725 726 super_tmp = page_address(page); 727 728 if (btrfs_super_bytenr(super_tmp) != dev_bytenr || 729 btrfs_super_magic(super_tmp) != BTRFS_MAGIC || 730 memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) || 731 btrfs_super_nodesize(super_tmp) != state->metablock_size || 732 btrfs_super_sectorsize(super_tmp) != state->datablock_size) { 733 ret = 0; 734 goto out; 735 } 736 737 superblock_tmp = 738 btrfsic_block_hashtable_lookup(superblock_bdev, 739 dev_bytenr, 740 &state->block_hashtable); 741 if (NULL == superblock_tmp) { 742 superblock_tmp = btrfsic_block_alloc(); 743 if (NULL == superblock_tmp) { 744 ret = -1; 745 goto out; 746 } 747 /* for superblock, only the dev_bytenr makes sense */ 748 superblock_tmp->dev_bytenr = dev_bytenr; 749 superblock_tmp->dev_state = dev_state; 750 superblock_tmp->logical_bytenr = dev_bytenr; 751 superblock_tmp->generation = btrfs_super_generation(super_tmp); 752 superblock_tmp->is_metadata = 1; 753 superblock_tmp->is_superblock = 1; 754 superblock_tmp->is_iodone = 1; 755 superblock_tmp->never_written = 0; 756 superblock_tmp->mirror_num = 1 + superblock_mirror_num; 757 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 758 btrfs_info_in_rcu(fs_info, 759 "new initial S-block (bdev %p, %s) @%llu (%s/%llu/%d)", 760 superblock_bdev, 761 rcu_str_deref(device->name), dev_bytenr, 762 dev_state->name, dev_bytenr, 763 superblock_mirror_num); 764 list_add(&superblock_tmp->all_blocks_node, 765 &state->all_blocks_list); 766 btrfsic_block_hashtable_add(superblock_tmp, 767 &state->block_hashtable); 768 } 769 770 /* select the one with the highest generation field */ 771 if (btrfs_super_generation(super_tmp) > 772 state->max_superblock_generation || 773 0 == state->max_superblock_generation) { 774 memcpy(selected_super, super_tmp, sizeof(*selected_super)); 775 *selected_dev_state = dev_state; 776 state->max_superblock_generation = 777 btrfs_super_generation(super_tmp); 778 state->latest_superblock = superblock_tmp; 779 } 780 781 for (pass = 0; pass < 3; pass++) { 782 u64 next_bytenr; 783 int num_copies; 784 int mirror_num; 785 const char *additional_string = NULL; 786 struct btrfs_disk_key tmp_disk_key; 787 788 tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY; 789 tmp_disk_key.offset = 0; 790 switch (pass) { 791 case 0: 792 btrfs_set_disk_key_objectid(&tmp_disk_key, 793 BTRFS_ROOT_TREE_OBJECTID); 794 additional_string = "initial root "; 795 next_bytenr = btrfs_super_root(super_tmp); 796 break; 797 case 1: 798 btrfs_set_disk_key_objectid(&tmp_disk_key, 799 BTRFS_CHUNK_TREE_OBJECTID); 800 additional_string = "initial chunk "; 801 next_bytenr = btrfs_super_chunk_root(super_tmp); 802 break; 803 case 2: 804 btrfs_set_disk_key_objectid(&tmp_disk_key, 805 BTRFS_TREE_LOG_OBJECTID); 806 additional_string = "initial log "; 807 next_bytenr = btrfs_super_log_root(super_tmp); 808 if (0 == next_bytenr) 809 continue; 810 break; 811 } 812 813 num_copies = btrfs_num_copies(fs_info, next_bytenr, 814 state->metablock_size); 815 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 816 pr_info("num_copies(log_bytenr=%llu) = %d\n", 817 next_bytenr, num_copies); 818 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 819 struct btrfsic_block *next_block; 820 struct btrfsic_block_data_ctx tmp_next_block_ctx; 821 struct btrfsic_block_link *l; 822 823 if (btrfsic_map_block(state, next_bytenr, 824 state->metablock_size, 825 &tmp_next_block_ctx, 826 mirror_num)) { 827 pr_info("btrfsic: btrfsic_map_block(bytenr @%llu, mirror %d) failed!\n", 828 next_bytenr, mirror_num); 829 ret = -1; 830 goto out; 831 } 832 833 next_block = btrfsic_block_lookup_or_add( 834 state, &tmp_next_block_ctx, 835 additional_string, 1, 1, 0, 836 mirror_num, NULL); 837 if (NULL == next_block) { 838 btrfsic_release_block_ctx(&tmp_next_block_ctx); 839 ret = -1; 840 goto out; 841 } 842 843 next_block->disk_key = tmp_disk_key; 844 next_block->generation = BTRFSIC_GENERATION_UNKNOWN; 845 l = btrfsic_block_link_lookup_or_add( 846 state, &tmp_next_block_ctx, 847 next_block, superblock_tmp, 848 BTRFSIC_GENERATION_UNKNOWN); 849 btrfsic_release_block_ctx(&tmp_next_block_ctx); 850 if (NULL == l) { 851 ret = -1; 852 goto out; 853 } 854 } 855 } 856 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES) 857 btrfsic_dump_tree_sub(state, superblock_tmp, 0); 858 859 out: 860 put_page(page); 861 return ret; 862 } 863 864 static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void) 865 { 866 struct btrfsic_stack_frame *sf; 867 868 sf = kzalloc(sizeof(*sf), GFP_NOFS); 869 if (sf) 870 sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER; 871 return sf; 872 } 873 874 static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf) 875 { 876 BUG_ON(!(NULL == sf || 877 BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic)); 878 kfree(sf); 879 } 880 881 static noinline_for_stack int btrfsic_process_metablock( 882 struct btrfsic_state *state, 883 struct btrfsic_block *const first_block, 884 struct btrfsic_block_data_ctx *const first_block_ctx, 885 int first_limit_nesting, int force_iodone_flag) 886 { 887 struct btrfsic_stack_frame initial_stack_frame = { 0 }; 888 struct btrfsic_stack_frame *sf; 889 struct btrfsic_stack_frame *next_stack; 890 struct btrfs_header *const first_hdr = 891 (struct btrfs_header *)first_block_ctx->datav[0]; 892 893 BUG_ON(!first_hdr); 894 sf = &initial_stack_frame; 895 sf->error = 0; 896 sf->i = -1; 897 sf->limit_nesting = first_limit_nesting; 898 sf->block = first_block; 899 sf->block_ctx = first_block_ctx; 900 sf->next_block = NULL; 901 sf->hdr = first_hdr; 902 sf->prev = NULL; 903 904 continue_with_new_stack_frame: 905 sf->block->generation = btrfs_stack_header_generation(sf->hdr); 906 if (0 == sf->hdr->level) { 907 struct btrfs_leaf *const leafhdr = 908 (struct btrfs_leaf *)sf->hdr; 909 910 if (-1 == sf->i) { 911 sf->nr = btrfs_stack_header_nritems(&leafhdr->header); 912 913 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 914 pr_info("leaf %llu items %d generation %llu owner %llu\n", 915 sf->block_ctx->start, sf->nr, 916 btrfs_stack_header_generation( 917 &leafhdr->header), 918 btrfs_stack_header_owner( 919 &leafhdr->header)); 920 } 921 922 continue_with_current_leaf_stack_frame: 923 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { 924 sf->i++; 925 sf->num_copies = 0; 926 } 927 928 if (sf->i < sf->nr) { 929 struct btrfs_item disk_item; 930 u32 disk_item_offset = 931 (uintptr_t)(leafhdr->items + sf->i) - 932 (uintptr_t)leafhdr; 933 struct btrfs_disk_key *disk_key; 934 u8 type; 935 u32 item_offset; 936 u32 item_size; 937 938 if (disk_item_offset + sizeof(struct btrfs_item) > 939 sf->block_ctx->len) { 940 leaf_item_out_of_bounce_error: 941 pr_info("btrfsic: leaf item out of bounce at logical %llu, dev %s\n", 942 sf->block_ctx->start, 943 sf->block_ctx->dev->name); 944 goto one_stack_frame_backwards; 945 } 946 btrfsic_read_from_block_data(sf->block_ctx, 947 &disk_item, 948 disk_item_offset, 949 sizeof(struct btrfs_item)); 950 item_offset = btrfs_stack_item_offset(&disk_item); 951 item_size = btrfs_stack_item_size(&disk_item); 952 disk_key = &disk_item.key; 953 type = btrfs_disk_key_type(disk_key); 954 955 if (BTRFS_ROOT_ITEM_KEY == type) { 956 struct btrfs_root_item root_item; 957 u32 root_item_offset; 958 u64 next_bytenr; 959 960 root_item_offset = item_offset + 961 offsetof(struct btrfs_leaf, items); 962 if (root_item_offset + item_size > 963 sf->block_ctx->len) 964 goto leaf_item_out_of_bounce_error; 965 btrfsic_read_from_block_data( 966 sf->block_ctx, &root_item, 967 root_item_offset, 968 item_size); 969 next_bytenr = btrfs_root_bytenr(&root_item); 970 971 sf->error = 972 btrfsic_create_link_to_next_block( 973 state, 974 sf->block, 975 sf->block_ctx, 976 next_bytenr, 977 sf->limit_nesting, 978 &sf->next_block_ctx, 979 &sf->next_block, 980 force_iodone_flag, 981 &sf->num_copies, 982 &sf->mirror_num, 983 disk_key, 984 btrfs_root_generation( 985 &root_item)); 986 if (sf->error) 987 goto one_stack_frame_backwards; 988 989 if (NULL != sf->next_block) { 990 struct btrfs_header *const next_hdr = 991 (struct btrfs_header *) 992 sf->next_block_ctx.datav[0]; 993 994 next_stack = 995 btrfsic_stack_frame_alloc(); 996 if (NULL == next_stack) { 997 sf->error = -1; 998 btrfsic_release_block_ctx( 999 &sf-> 1000 next_block_ctx); 1001 goto one_stack_frame_backwards; 1002 } 1003 1004 next_stack->i = -1; 1005 next_stack->block = sf->next_block; 1006 next_stack->block_ctx = 1007 &sf->next_block_ctx; 1008 next_stack->next_block = NULL; 1009 next_stack->hdr = next_hdr; 1010 next_stack->limit_nesting = 1011 sf->limit_nesting - 1; 1012 next_stack->prev = sf; 1013 sf = next_stack; 1014 goto continue_with_new_stack_frame; 1015 } 1016 } else if (BTRFS_EXTENT_DATA_KEY == type && 1017 state->include_extent_data) { 1018 sf->error = btrfsic_handle_extent_data( 1019 state, 1020 sf->block, 1021 sf->block_ctx, 1022 item_offset, 1023 force_iodone_flag); 1024 if (sf->error) 1025 goto one_stack_frame_backwards; 1026 } 1027 1028 goto continue_with_current_leaf_stack_frame; 1029 } 1030 } else { 1031 struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr; 1032 1033 if (-1 == sf->i) { 1034 sf->nr = btrfs_stack_header_nritems(&nodehdr->header); 1035 1036 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1037 pr_info("node %llu level %d items %d generation %llu owner %llu\n", 1038 sf->block_ctx->start, 1039 nodehdr->header.level, sf->nr, 1040 btrfs_stack_header_generation( 1041 &nodehdr->header), 1042 btrfs_stack_header_owner( 1043 &nodehdr->header)); 1044 } 1045 1046 continue_with_current_node_stack_frame: 1047 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { 1048 sf->i++; 1049 sf->num_copies = 0; 1050 } 1051 1052 if (sf->i < sf->nr) { 1053 struct btrfs_key_ptr key_ptr; 1054 u32 key_ptr_offset; 1055 u64 next_bytenr; 1056 1057 key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) - 1058 (uintptr_t)nodehdr; 1059 if (key_ptr_offset + sizeof(struct btrfs_key_ptr) > 1060 sf->block_ctx->len) { 1061 pr_info("btrfsic: node item out of bounce at logical %llu, dev %s\n", 1062 sf->block_ctx->start, 1063 sf->block_ctx->dev->name); 1064 goto one_stack_frame_backwards; 1065 } 1066 btrfsic_read_from_block_data( 1067 sf->block_ctx, &key_ptr, key_ptr_offset, 1068 sizeof(struct btrfs_key_ptr)); 1069 next_bytenr = btrfs_stack_key_blockptr(&key_ptr); 1070 1071 sf->error = btrfsic_create_link_to_next_block( 1072 state, 1073 sf->block, 1074 sf->block_ctx, 1075 next_bytenr, 1076 sf->limit_nesting, 1077 &sf->next_block_ctx, 1078 &sf->next_block, 1079 force_iodone_flag, 1080 &sf->num_copies, 1081 &sf->mirror_num, 1082 &key_ptr.key, 1083 btrfs_stack_key_generation(&key_ptr)); 1084 if (sf->error) 1085 goto one_stack_frame_backwards; 1086 1087 if (NULL != sf->next_block) { 1088 struct btrfs_header *const next_hdr = 1089 (struct btrfs_header *) 1090 sf->next_block_ctx.datav[0]; 1091 1092 next_stack = btrfsic_stack_frame_alloc(); 1093 if (NULL == next_stack) { 1094 sf->error = -1; 1095 goto one_stack_frame_backwards; 1096 } 1097 1098 next_stack->i = -1; 1099 next_stack->block = sf->next_block; 1100 next_stack->block_ctx = &sf->next_block_ctx; 1101 next_stack->next_block = NULL; 1102 next_stack->hdr = next_hdr; 1103 next_stack->limit_nesting = 1104 sf->limit_nesting - 1; 1105 next_stack->prev = sf; 1106 sf = next_stack; 1107 goto continue_with_new_stack_frame; 1108 } 1109 1110 goto continue_with_current_node_stack_frame; 1111 } 1112 } 1113 1114 one_stack_frame_backwards: 1115 if (NULL != sf->prev) { 1116 struct btrfsic_stack_frame *const prev = sf->prev; 1117 1118 /* the one for the initial block is freed in the caller */ 1119 btrfsic_release_block_ctx(sf->block_ctx); 1120 1121 if (sf->error) { 1122 prev->error = sf->error; 1123 btrfsic_stack_frame_free(sf); 1124 sf = prev; 1125 goto one_stack_frame_backwards; 1126 } 1127 1128 btrfsic_stack_frame_free(sf); 1129 sf = prev; 1130 goto continue_with_new_stack_frame; 1131 } else { 1132 BUG_ON(&initial_stack_frame != sf); 1133 } 1134 1135 return sf->error; 1136 } 1137 1138 static void btrfsic_read_from_block_data( 1139 struct btrfsic_block_data_ctx *block_ctx, 1140 void *dstv, u32 offset, size_t len) 1141 { 1142 size_t cur; 1143 size_t pgoff; 1144 char *kaddr; 1145 char *dst = (char *)dstv; 1146 size_t start_offset = offset_in_page(block_ctx->start); 1147 unsigned long i = (start_offset + offset) >> PAGE_SHIFT; 1148 1149 WARN_ON(offset + len > block_ctx->len); 1150 pgoff = offset_in_page(start_offset + offset); 1151 1152 while (len > 0) { 1153 cur = min(len, ((size_t)PAGE_SIZE - pgoff)); 1154 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE)); 1155 kaddr = block_ctx->datav[i]; 1156 memcpy(dst, kaddr + pgoff, cur); 1157 1158 dst += cur; 1159 len -= cur; 1160 pgoff = 0; 1161 i++; 1162 } 1163 } 1164 1165 static int btrfsic_create_link_to_next_block( 1166 struct btrfsic_state *state, 1167 struct btrfsic_block *block, 1168 struct btrfsic_block_data_ctx *block_ctx, 1169 u64 next_bytenr, 1170 int limit_nesting, 1171 struct btrfsic_block_data_ctx *next_block_ctx, 1172 struct btrfsic_block **next_blockp, 1173 int force_iodone_flag, 1174 int *num_copiesp, int *mirror_nump, 1175 struct btrfs_disk_key *disk_key, 1176 u64 parent_generation) 1177 { 1178 struct btrfs_fs_info *fs_info = state->fs_info; 1179 struct btrfsic_block *next_block = NULL; 1180 int ret; 1181 struct btrfsic_block_link *l; 1182 int did_alloc_block_link; 1183 int block_was_created; 1184 1185 *next_blockp = NULL; 1186 if (0 == *num_copiesp) { 1187 *num_copiesp = btrfs_num_copies(fs_info, next_bytenr, 1188 state->metablock_size); 1189 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1190 pr_info("num_copies(log_bytenr=%llu) = %d\n", 1191 next_bytenr, *num_copiesp); 1192 *mirror_nump = 1; 1193 } 1194 1195 if (*mirror_nump > *num_copiesp) 1196 return 0; 1197 1198 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1199 pr_info("btrfsic_create_link_to_next_block(mirror_num=%d)\n", 1200 *mirror_nump); 1201 ret = btrfsic_map_block(state, next_bytenr, 1202 state->metablock_size, 1203 next_block_ctx, *mirror_nump); 1204 if (ret) { 1205 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 1206 next_bytenr, *mirror_nump); 1207 btrfsic_release_block_ctx(next_block_ctx); 1208 *next_blockp = NULL; 1209 return -1; 1210 } 1211 1212 next_block = btrfsic_block_lookup_or_add(state, 1213 next_block_ctx, "referenced ", 1214 1, force_iodone_flag, 1215 !force_iodone_flag, 1216 *mirror_nump, 1217 &block_was_created); 1218 if (NULL == next_block) { 1219 btrfsic_release_block_ctx(next_block_ctx); 1220 *next_blockp = NULL; 1221 return -1; 1222 } 1223 if (block_was_created) { 1224 l = NULL; 1225 next_block->generation = BTRFSIC_GENERATION_UNKNOWN; 1226 } else { 1227 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) { 1228 if (next_block->logical_bytenr != next_bytenr && 1229 !(!next_block->is_metadata && 1230 0 == next_block->logical_bytenr)) 1231 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n", 1232 next_bytenr, next_block_ctx->dev->name, 1233 next_block_ctx->dev_bytenr, *mirror_nump, 1234 btrfsic_get_block_type(state, 1235 next_block), 1236 next_block->logical_bytenr); 1237 else 1238 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c.\n", 1239 next_bytenr, next_block_ctx->dev->name, 1240 next_block_ctx->dev_bytenr, *mirror_nump, 1241 btrfsic_get_block_type(state, 1242 next_block)); 1243 } 1244 next_block->logical_bytenr = next_bytenr; 1245 1246 next_block->mirror_num = *mirror_nump; 1247 l = btrfsic_block_link_hashtable_lookup( 1248 next_block_ctx->dev->bdev, 1249 next_block_ctx->dev_bytenr, 1250 block_ctx->dev->bdev, 1251 block_ctx->dev_bytenr, 1252 &state->block_link_hashtable); 1253 } 1254 1255 next_block->disk_key = *disk_key; 1256 if (NULL == l) { 1257 l = btrfsic_block_link_alloc(); 1258 if (NULL == l) { 1259 btrfsic_release_block_ctx(next_block_ctx); 1260 *next_blockp = NULL; 1261 return -1; 1262 } 1263 1264 did_alloc_block_link = 1; 1265 l->block_ref_to = next_block; 1266 l->block_ref_from = block; 1267 l->ref_cnt = 1; 1268 l->parent_generation = parent_generation; 1269 1270 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1271 btrfsic_print_add_link(state, l); 1272 1273 list_add(&l->node_ref_to, &block->ref_to_list); 1274 list_add(&l->node_ref_from, &next_block->ref_from_list); 1275 1276 btrfsic_block_link_hashtable_add(l, 1277 &state->block_link_hashtable); 1278 } else { 1279 did_alloc_block_link = 0; 1280 if (0 == limit_nesting) { 1281 l->ref_cnt++; 1282 l->parent_generation = parent_generation; 1283 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1284 btrfsic_print_add_link(state, l); 1285 } 1286 } 1287 1288 if (limit_nesting > 0 && did_alloc_block_link) { 1289 ret = btrfsic_read_block(state, next_block_ctx); 1290 if (ret < (int)next_block_ctx->len) { 1291 pr_info("btrfsic: read block @logical %llu failed!\n", 1292 next_bytenr); 1293 btrfsic_release_block_ctx(next_block_ctx); 1294 *next_blockp = NULL; 1295 return -1; 1296 } 1297 1298 *next_blockp = next_block; 1299 } else { 1300 *next_blockp = NULL; 1301 } 1302 (*mirror_nump)++; 1303 1304 return 0; 1305 } 1306 1307 static int btrfsic_handle_extent_data( 1308 struct btrfsic_state *state, 1309 struct btrfsic_block *block, 1310 struct btrfsic_block_data_ctx *block_ctx, 1311 u32 item_offset, int force_iodone_flag) 1312 { 1313 struct btrfs_fs_info *fs_info = state->fs_info; 1314 struct btrfs_file_extent_item file_extent_item; 1315 u64 file_extent_item_offset; 1316 u64 next_bytenr; 1317 u64 num_bytes; 1318 u64 generation; 1319 struct btrfsic_block_link *l; 1320 int ret; 1321 1322 file_extent_item_offset = offsetof(struct btrfs_leaf, items) + 1323 item_offset; 1324 if (file_extent_item_offset + 1325 offsetof(struct btrfs_file_extent_item, disk_num_bytes) > 1326 block_ctx->len) { 1327 pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n", 1328 block_ctx->start, block_ctx->dev->name); 1329 return -1; 1330 } 1331 1332 btrfsic_read_from_block_data(block_ctx, &file_extent_item, 1333 file_extent_item_offset, 1334 offsetof(struct btrfs_file_extent_item, disk_num_bytes)); 1335 if (BTRFS_FILE_EXTENT_REG != file_extent_item.type || 1336 btrfs_stack_file_extent_disk_bytenr(&file_extent_item) == 0) { 1337 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1338 pr_info("extent_data: type %u, disk_bytenr = %llu\n", 1339 file_extent_item.type, 1340 btrfs_stack_file_extent_disk_bytenr( 1341 &file_extent_item)); 1342 return 0; 1343 } 1344 1345 if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) > 1346 block_ctx->len) { 1347 pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n", 1348 block_ctx->start, block_ctx->dev->name); 1349 return -1; 1350 } 1351 btrfsic_read_from_block_data(block_ctx, &file_extent_item, 1352 file_extent_item_offset, 1353 sizeof(struct btrfs_file_extent_item)); 1354 next_bytenr = btrfs_stack_file_extent_disk_bytenr(&file_extent_item); 1355 if (btrfs_stack_file_extent_compression(&file_extent_item) == 1356 BTRFS_COMPRESS_NONE) { 1357 next_bytenr += btrfs_stack_file_extent_offset(&file_extent_item); 1358 num_bytes = btrfs_stack_file_extent_num_bytes(&file_extent_item); 1359 } else { 1360 num_bytes = btrfs_stack_file_extent_disk_num_bytes(&file_extent_item); 1361 } 1362 generation = btrfs_stack_file_extent_generation(&file_extent_item); 1363 1364 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1365 pr_info("extent_data: type %u, disk_bytenr = %llu, offset = %llu, num_bytes = %llu\n", 1366 file_extent_item.type, 1367 btrfs_stack_file_extent_disk_bytenr(&file_extent_item), 1368 btrfs_stack_file_extent_offset(&file_extent_item), 1369 num_bytes); 1370 while (num_bytes > 0) { 1371 u32 chunk_len; 1372 int num_copies; 1373 int mirror_num; 1374 1375 if (num_bytes > state->datablock_size) 1376 chunk_len = state->datablock_size; 1377 else 1378 chunk_len = num_bytes; 1379 1380 num_copies = btrfs_num_copies(fs_info, next_bytenr, 1381 state->datablock_size); 1382 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1383 pr_info("num_copies(log_bytenr=%llu) = %d\n", 1384 next_bytenr, num_copies); 1385 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 1386 struct btrfsic_block_data_ctx next_block_ctx; 1387 struct btrfsic_block *next_block; 1388 int block_was_created; 1389 1390 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1391 pr_info("btrfsic_handle_extent_data(mirror_num=%d)\n", 1392 mirror_num); 1393 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1394 pr_info("\tdisk_bytenr = %llu, num_bytes %u\n", 1395 next_bytenr, chunk_len); 1396 ret = btrfsic_map_block(state, next_bytenr, 1397 chunk_len, &next_block_ctx, 1398 mirror_num); 1399 if (ret) { 1400 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 1401 next_bytenr, mirror_num); 1402 return -1; 1403 } 1404 1405 next_block = btrfsic_block_lookup_or_add( 1406 state, 1407 &next_block_ctx, 1408 "referenced ", 1409 0, 1410 force_iodone_flag, 1411 !force_iodone_flag, 1412 mirror_num, 1413 &block_was_created); 1414 if (NULL == next_block) { 1415 btrfsic_release_block_ctx(&next_block_ctx); 1416 return -1; 1417 } 1418 if (!block_was_created) { 1419 if ((state->print_mask & 1420 BTRFSIC_PRINT_MASK_VERBOSE) && 1421 next_block->logical_bytenr != next_bytenr && 1422 !(!next_block->is_metadata && 1423 0 == next_block->logical_bytenr)) { 1424 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, D, bytenr mismatch (!= stored %llu).\n", 1425 next_bytenr, 1426 next_block_ctx.dev->name, 1427 next_block_ctx.dev_bytenr, 1428 mirror_num, 1429 next_block->logical_bytenr); 1430 } 1431 next_block->logical_bytenr = next_bytenr; 1432 next_block->mirror_num = mirror_num; 1433 } 1434 1435 l = btrfsic_block_link_lookup_or_add(state, 1436 &next_block_ctx, 1437 next_block, block, 1438 generation); 1439 btrfsic_release_block_ctx(&next_block_ctx); 1440 if (NULL == l) 1441 return -1; 1442 } 1443 1444 next_bytenr += chunk_len; 1445 num_bytes -= chunk_len; 1446 } 1447 1448 return 0; 1449 } 1450 1451 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 1452 struct btrfsic_block_data_ctx *block_ctx_out, 1453 int mirror_num) 1454 { 1455 struct btrfs_fs_info *fs_info = state->fs_info; 1456 int ret; 1457 u64 length; 1458 struct btrfs_bio *multi = NULL; 1459 struct btrfs_device *device; 1460 1461 length = len; 1462 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, 1463 bytenr, &length, &multi, mirror_num); 1464 1465 if (ret) { 1466 block_ctx_out->start = 0; 1467 block_ctx_out->dev_bytenr = 0; 1468 block_ctx_out->len = 0; 1469 block_ctx_out->dev = NULL; 1470 block_ctx_out->datav = NULL; 1471 block_ctx_out->pagev = NULL; 1472 block_ctx_out->mem_to_free = NULL; 1473 1474 return ret; 1475 } 1476 1477 device = multi->stripes[0].dev; 1478 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) || 1479 !device->bdev || !device->name) 1480 block_ctx_out->dev = NULL; 1481 else 1482 block_ctx_out->dev = btrfsic_dev_state_lookup( 1483 device->bdev->bd_dev); 1484 block_ctx_out->dev_bytenr = multi->stripes[0].physical; 1485 block_ctx_out->start = bytenr; 1486 block_ctx_out->len = len; 1487 block_ctx_out->datav = NULL; 1488 block_ctx_out->pagev = NULL; 1489 block_ctx_out->mem_to_free = NULL; 1490 1491 kfree(multi); 1492 if (NULL == block_ctx_out->dev) { 1493 ret = -ENXIO; 1494 pr_info("btrfsic: error, cannot lookup dev (#1)!\n"); 1495 } 1496 1497 return ret; 1498 } 1499 1500 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx) 1501 { 1502 if (block_ctx->mem_to_free) { 1503 unsigned int num_pages; 1504 1505 BUG_ON(!block_ctx->datav); 1506 BUG_ON(!block_ctx->pagev); 1507 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> 1508 PAGE_SHIFT; 1509 /* Pages must be unmapped in reverse order */ 1510 while (num_pages > 0) { 1511 num_pages--; 1512 if (block_ctx->datav[num_pages]) 1513 block_ctx->datav[num_pages] = NULL; 1514 if (block_ctx->pagev[num_pages]) { 1515 __free_page(block_ctx->pagev[num_pages]); 1516 block_ctx->pagev[num_pages] = NULL; 1517 } 1518 } 1519 1520 kfree(block_ctx->mem_to_free); 1521 block_ctx->mem_to_free = NULL; 1522 block_ctx->pagev = NULL; 1523 block_ctx->datav = NULL; 1524 } 1525 } 1526 1527 static int btrfsic_read_block(struct btrfsic_state *state, 1528 struct btrfsic_block_data_ctx *block_ctx) 1529 { 1530 unsigned int num_pages; 1531 unsigned int i; 1532 size_t size; 1533 u64 dev_bytenr; 1534 int ret; 1535 1536 BUG_ON(block_ctx->datav); 1537 BUG_ON(block_ctx->pagev); 1538 BUG_ON(block_ctx->mem_to_free); 1539 if (!PAGE_ALIGNED(block_ctx->dev_bytenr)) { 1540 pr_info("btrfsic: read_block() with unaligned bytenr %llu\n", 1541 block_ctx->dev_bytenr); 1542 return -1; 1543 } 1544 1545 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> 1546 PAGE_SHIFT; 1547 size = sizeof(*block_ctx->datav) + sizeof(*block_ctx->pagev); 1548 block_ctx->mem_to_free = kcalloc(num_pages, size, GFP_NOFS); 1549 if (!block_ctx->mem_to_free) 1550 return -ENOMEM; 1551 block_ctx->datav = block_ctx->mem_to_free; 1552 block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages); 1553 for (i = 0; i < num_pages; i++) { 1554 block_ctx->pagev[i] = alloc_page(GFP_NOFS); 1555 if (!block_ctx->pagev[i]) 1556 return -1; 1557 } 1558 1559 dev_bytenr = block_ctx->dev_bytenr; 1560 for (i = 0; i < num_pages;) { 1561 struct bio *bio; 1562 unsigned int j; 1563 1564 bio = btrfs_io_bio_alloc(num_pages - i); 1565 bio_set_dev(bio, block_ctx->dev->bdev); 1566 bio->bi_iter.bi_sector = dev_bytenr >> 9; 1567 bio->bi_opf = REQ_OP_READ; 1568 1569 for (j = i; j < num_pages; j++) { 1570 ret = bio_add_page(bio, block_ctx->pagev[j], 1571 PAGE_SIZE, 0); 1572 if (PAGE_SIZE != ret) 1573 break; 1574 } 1575 if (j == i) { 1576 pr_info("btrfsic: error, failed to add a single page!\n"); 1577 return -1; 1578 } 1579 if (submit_bio_wait(bio)) { 1580 pr_info("btrfsic: read error at logical %llu dev %s!\n", 1581 block_ctx->start, block_ctx->dev->name); 1582 bio_put(bio); 1583 return -1; 1584 } 1585 bio_put(bio); 1586 dev_bytenr += (j - i) * PAGE_SIZE; 1587 i = j; 1588 } 1589 for (i = 0; i < num_pages; i++) 1590 block_ctx->datav[i] = page_address(block_ctx->pagev[i]); 1591 1592 return block_ctx->len; 1593 } 1594 1595 static void btrfsic_dump_database(struct btrfsic_state *state) 1596 { 1597 const struct btrfsic_block *b_all; 1598 1599 BUG_ON(NULL == state); 1600 1601 pr_info("all_blocks_list:\n"); 1602 list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) { 1603 const struct btrfsic_block_link *l; 1604 1605 pr_info("%c-block @%llu (%s/%llu/%d)\n", 1606 btrfsic_get_block_type(state, b_all), 1607 b_all->logical_bytenr, b_all->dev_state->name, 1608 b_all->dev_bytenr, b_all->mirror_num); 1609 1610 list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) { 1611 pr_info(" %c @%llu (%s/%llu/%d) refers %u* to %c @%llu (%s/%llu/%d)\n", 1612 btrfsic_get_block_type(state, b_all), 1613 b_all->logical_bytenr, b_all->dev_state->name, 1614 b_all->dev_bytenr, b_all->mirror_num, 1615 l->ref_cnt, 1616 btrfsic_get_block_type(state, l->block_ref_to), 1617 l->block_ref_to->logical_bytenr, 1618 l->block_ref_to->dev_state->name, 1619 l->block_ref_to->dev_bytenr, 1620 l->block_ref_to->mirror_num); 1621 } 1622 1623 list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) { 1624 pr_info(" %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n", 1625 btrfsic_get_block_type(state, b_all), 1626 b_all->logical_bytenr, b_all->dev_state->name, 1627 b_all->dev_bytenr, b_all->mirror_num, 1628 l->ref_cnt, 1629 btrfsic_get_block_type(state, l->block_ref_from), 1630 l->block_ref_from->logical_bytenr, 1631 l->block_ref_from->dev_state->name, 1632 l->block_ref_from->dev_bytenr, 1633 l->block_ref_from->mirror_num); 1634 } 1635 1636 pr_info("\n"); 1637 } 1638 } 1639 1640 /* 1641 * Test whether the disk block contains a tree block (leaf or node) 1642 * (note that this test fails for the super block) 1643 */ 1644 static noinline_for_stack int btrfsic_test_for_metadata( 1645 struct btrfsic_state *state, 1646 char **datav, unsigned int num_pages) 1647 { 1648 struct btrfs_fs_info *fs_info = state->fs_info; 1649 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 1650 struct btrfs_header *h; 1651 u8 csum[BTRFS_CSUM_SIZE]; 1652 unsigned int i; 1653 1654 if (num_pages * PAGE_SIZE < state->metablock_size) 1655 return 1; /* not metadata */ 1656 num_pages = state->metablock_size >> PAGE_SHIFT; 1657 h = (struct btrfs_header *)datav[0]; 1658 1659 if (memcmp(h->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE)) 1660 return 1; 1661 1662 shash->tfm = fs_info->csum_shash; 1663 crypto_shash_init(shash); 1664 1665 for (i = 0; i < num_pages; i++) { 1666 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE); 1667 size_t sublen = i ? PAGE_SIZE : 1668 (PAGE_SIZE - BTRFS_CSUM_SIZE); 1669 1670 crypto_shash_update(shash, data, sublen); 1671 } 1672 crypto_shash_final(shash, csum); 1673 if (memcmp(csum, h->csum, fs_info->csum_size)) 1674 return 1; 1675 1676 return 0; /* is metadata */ 1677 } 1678 1679 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 1680 u64 dev_bytenr, char **mapped_datav, 1681 unsigned int num_pages, 1682 struct bio *bio, int *bio_is_patched, 1683 int submit_bio_bh_rw) 1684 { 1685 int is_metadata; 1686 struct btrfsic_block *block; 1687 struct btrfsic_block_data_ctx block_ctx; 1688 int ret; 1689 struct btrfsic_state *state = dev_state->state; 1690 struct block_device *bdev = dev_state->bdev; 1691 unsigned int processed_len; 1692 1693 if (NULL != bio_is_patched) 1694 *bio_is_patched = 0; 1695 1696 again: 1697 if (num_pages == 0) 1698 return; 1699 1700 processed_len = 0; 1701 is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav, 1702 num_pages)); 1703 1704 block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr, 1705 &state->block_hashtable); 1706 if (NULL != block) { 1707 u64 bytenr = 0; 1708 struct btrfsic_block_link *l, *tmp; 1709 1710 if (block->is_superblock) { 1711 bytenr = btrfs_super_bytenr((struct btrfs_super_block *) 1712 mapped_datav[0]); 1713 if (num_pages * PAGE_SIZE < 1714 BTRFS_SUPER_INFO_SIZE) { 1715 pr_info("btrfsic: cannot work with too short bios!\n"); 1716 return; 1717 } 1718 is_metadata = 1; 1719 BUG_ON(!PAGE_ALIGNED(BTRFS_SUPER_INFO_SIZE)); 1720 processed_len = BTRFS_SUPER_INFO_SIZE; 1721 if (state->print_mask & 1722 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { 1723 pr_info("[before new superblock is written]:\n"); 1724 btrfsic_dump_tree_sub(state, block, 0); 1725 } 1726 } 1727 if (is_metadata) { 1728 if (!block->is_superblock) { 1729 if (num_pages * PAGE_SIZE < 1730 state->metablock_size) { 1731 pr_info("btrfsic: cannot work with too short bios!\n"); 1732 return; 1733 } 1734 processed_len = state->metablock_size; 1735 bytenr = btrfs_stack_header_bytenr( 1736 (struct btrfs_header *) 1737 mapped_datav[0]); 1738 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, 1739 dev_state, 1740 dev_bytenr); 1741 } 1742 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) { 1743 if (block->logical_bytenr != bytenr && 1744 !(!block->is_metadata && 1745 block->logical_bytenr == 0)) 1746 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n", 1747 bytenr, dev_state->name, 1748 dev_bytenr, 1749 block->mirror_num, 1750 btrfsic_get_block_type(state, 1751 block), 1752 block->logical_bytenr); 1753 else 1754 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n", 1755 bytenr, dev_state->name, 1756 dev_bytenr, block->mirror_num, 1757 btrfsic_get_block_type(state, 1758 block)); 1759 } 1760 block->logical_bytenr = bytenr; 1761 } else { 1762 if (num_pages * PAGE_SIZE < 1763 state->datablock_size) { 1764 pr_info("btrfsic: cannot work with too short bios!\n"); 1765 return; 1766 } 1767 processed_len = state->datablock_size; 1768 bytenr = block->logical_bytenr; 1769 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1770 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n", 1771 bytenr, dev_state->name, dev_bytenr, 1772 block->mirror_num, 1773 btrfsic_get_block_type(state, block)); 1774 } 1775 1776 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1777 pr_info("ref_to_list: %cE, ref_from_list: %cE\n", 1778 list_empty(&block->ref_to_list) ? ' ' : '!', 1779 list_empty(&block->ref_from_list) ? ' ' : '!'); 1780 if (btrfsic_is_block_ref_by_superblock(state, block, 0)) { 1781 pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), old(gen=%llu, objectid=%llu, type=%d, offset=%llu), new(gen=%llu), which is referenced by most recent superblock (superblockgen=%llu)!\n", 1782 btrfsic_get_block_type(state, block), bytenr, 1783 dev_state->name, dev_bytenr, block->mirror_num, 1784 block->generation, 1785 btrfs_disk_key_objectid(&block->disk_key), 1786 block->disk_key.type, 1787 btrfs_disk_key_offset(&block->disk_key), 1788 btrfs_stack_header_generation( 1789 (struct btrfs_header *) mapped_datav[0]), 1790 state->max_superblock_generation); 1791 btrfsic_dump_tree(state); 1792 } 1793 1794 if (!block->is_iodone && !block->never_written) { 1795 pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu, which is not yet iodone!\n", 1796 btrfsic_get_block_type(state, block), bytenr, 1797 dev_state->name, dev_bytenr, block->mirror_num, 1798 block->generation, 1799 btrfs_stack_header_generation( 1800 (struct btrfs_header *) 1801 mapped_datav[0])); 1802 /* it would not be safe to go on */ 1803 btrfsic_dump_tree(state); 1804 goto continue_loop; 1805 } 1806 1807 /* 1808 * Clear all references of this block. Do not free 1809 * the block itself even if is not referenced anymore 1810 * because it still carries valuable information 1811 * like whether it was ever written and IO completed. 1812 */ 1813 list_for_each_entry_safe(l, tmp, &block->ref_to_list, 1814 node_ref_to) { 1815 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1816 btrfsic_print_rem_link(state, l); 1817 l->ref_cnt--; 1818 if (0 == l->ref_cnt) { 1819 list_del(&l->node_ref_to); 1820 list_del(&l->node_ref_from); 1821 btrfsic_block_link_hashtable_remove(l); 1822 btrfsic_block_link_free(l); 1823 } 1824 } 1825 1826 block_ctx.dev = dev_state; 1827 block_ctx.dev_bytenr = dev_bytenr; 1828 block_ctx.start = bytenr; 1829 block_ctx.len = processed_len; 1830 block_ctx.pagev = NULL; 1831 block_ctx.mem_to_free = NULL; 1832 block_ctx.datav = mapped_datav; 1833 1834 if (is_metadata || state->include_extent_data) { 1835 block->never_written = 0; 1836 block->iodone_w_error = 0; 1837 if (NULL != bio) { 1838 block->is_iodone = 0; 1839 BUG_ON(NULL == bio_is_patched); 1840 if (!*bio_is_patched) { 1841 block->orig_bio_private = 1842 bio->bi_private; 1843 block->orig_bio_end_io = 1844 bio->bi_end_io; 1845 block->next_in_same_bio = NULL; 1846 bio->bi_private = block; 1847 bio->bi_end_io = btrfsic_bio_end_io; 1848 *bio_is_patched = 1; 1849 } else { 1850 struct btrfsic_block *chained_block = 1851 (struct btrfsic_block *) 1852 bio->bi_private; 1853 1854 BUG_ON(NULL == chained_block); 1855 block->orig_bio_private = 1856 chained_block->orig_bio_private; 1857 block->orig_bio_end_io = 1858 chained_block->orig_bio_end_io; 1859 block->next_in_same_bio = chained_block; 1860 bio->bi_private = block; 1861 } 1862 } else { 1863 block->is_iodone = 1; 1864 block->orig_bio_private = NULL; 1865 block->orig_bio_end_io = NULL; 1866 block->next_in_same_bio = NULL; 1867 } 1868 } 1869 1870 block->flush_gen = dev_state->last_flush_gen + 1; 1871 block->submit_bio_bh_rw = submit_bio_bh_rw; 1872 if (is_metadata) { 1873 block->logical_bytenr = bytenr; 1874 block->is_metadata = 1; 1875 if (block->is_superblock) { 1876 BUG_ON(PAGE_SIZE != 1877 BTRFS_SUPER_INFO_SIZE); 1878 ret = btrfsic_process_written_superblock( 1879 state, 1880 block, 1881 (struct btrfs_super_block *) 1882 mapped_datav[0]); 1883 if (state->print_mask & 1884 BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) { 1885 pr_info("[after new superblock is written]:\n"); 1886 btrfsic_dump_tree_sub(state, block, 0); 1887 } 1888 } else { 1889 block->mirror_num = 0; /* unknown */ 1890 ret = btrfsic_process_metablock( 1891 state, 1892 block, 1893 &block_ctx, 1894 0, 0); 1895 } 1896 if (ret) 1897 pr_info("btrfsic: btrfsic_process_metablock(root @%llu) failed!\n", 1898 dev_bytenr); 1899 } else { 1900 block->is_metadata = 0; 1901 block->mirror_num = 0; /* unknown */ 1902 block->generation = BTRFSIC_GENERATION_UNKNOWN; 1903 if (!state->include_extent_data 1904 && list_empty(&block->ref_from_list)) { 1905 /* 1906 * disk block is overwritten with extent 1907 * data (not meta data) and we are configured 1908 * to not include extent data: take the 1909 * chance and free the block's memory 1910 */ 1911 btrfsic_block_hashtable_remove(block); 1912 list_del(&block->all_blocks_node); 1913 btrfsic_block_free(block); 1914 } 1915 } 1916 btrfsic_release_block_ctx(&block_ctx); 1917 } else { 1918 /* block has not been found in hash table */ 1919 u64 bytenr; 1920 1921 if (!is_metadata) { 1922 processed_len = state->datablock_size; 1923 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1924 pr_info("Written block (%s/%llu/?) !found in hash table, D.\n", 1925 dev_state->name, dev_bytenr); 1926 if (!state->include_extent_data) { 1927 /* ignore that written D block */ 1928 goto continue_loop; 1929 } 1930 1931 /* this is getting ugly for the 1932 * include_extent_data case... */ 1933 bytenr = 0; /* unknown */ 1934 } else { 1935 processed_len = state->metablock_size; 1936 bytenr = btrfs_stack_header_bytenr( 1937 (struct btrfs_header *) 1938 mapped_datav[0]); 1939 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state, 1940 dev_bytenr); 1941 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1942 pr_info("Written block @%llu (%s/%llu/?) !found in hash table, M.\n", 1943 bytenr, dev_state->name, dev_bytenr); 1944 } 1945 1946 block_ctx.dev = dev_state; 1947 block_ctx.dev_bytenr = dev_bytenr; 1948 block_ctx.start = bytenr; 1949 block_ctx.len = processed_len; 1950 block_ctx.pagev = NULL; 1951 block_ctx.mem_to_free = NULL; 1952 block_ctx.datav = mapped_datav; 1953 1954 block = btrfsic_block_alloc(); 1955 if (NULL == block) { 1956 btrfsic_release_block_ctx(&block_ctx); 1957 goto continue_loop; 1958 } 1959 block->dev_state = dev_state; 1960 block->dev_bytenr = dev_bytenr; 1961 block->logical_bytenr = bytenr; 1962 block->is_metadata = is_metadata; 1963 block->never_written = 0; 1964 block->iodone_w_error = 0; 1965 block->mirror_num = 0; /* unknown */ 1966 block->flush_gen = dev_state->last_flush_gen + 1; 1967 block->submit_bio_bh_rw = submit_bio_bh_rw; 1968 if (NULL != bio) { 1969 block->is_iodone = 0; 1970 BUG_ON(NULL == bio_is_patched); 1971 if (!*bio_is_patched) { 1972 block->orig_bio_private = bio->bi_private; 1973 block->orig_bio_end_io = bio->bi_end_io; 1974 block->next_in_same_bio = NULL; 1975 bio->bi_private = block; 1976 bio->bi_end_io = btrfsic_bio_end_io; 1977 *bio_is_patched = 1; 1978 } else { 1979 struct btrfsic_block *chained_block = 1980 (struct btrfsic_block *) 1981 bio->bi_private; 1982 1983 BUG_ON(NULL == chained_block); 1984 block->orig_bio_private = 1985 chained_block->orig_bio_private; 1986 block->orig_bio_end_io = 1987 chained_block->orig_bio_end_io; 1988 block->next_in_same_bio = chained_block; 1989 bio->bi_private = block; 1990 } 1991 } else { 1992 block->is_iodone = 1; 1993 block->orig_bio_private = NULL; 1994 block->orig_bio_end_io = NULL; 1995 block->next_in_same_bio = NULL; 1996 } 1997 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1998 pr_info("New written %c-block @%llu (%s/%llu/%d)\n", 1999 is_metadata ? 'M' : 'D', 2000 block->logical_bytenr, block->dev_state->name, 2001 block->dev_bytenr, block->mirror_num); 2002 list_add(&block->all_blocks_node, &state->all_blocks_list); 2003 btrfsic_block_hashtable_add(block, &state->block_hashtable); 2004 2005 if (is_metadata) { 2006 ret = btrfsic_process_metablock(state, block, 2007 &block_ctx, 0, 0); 2008 if (ret) 2009 pr_info("btrfsic: process_metablock(root @%llu) failed!\n", 2010 dev_bytenr); 2011 } 2012 btrfsic_release_block_ctx(&block_ctx); 2013 } 2014 2015 continue_loop: 2016 BUG_ON(!processed_len); 2017 dev_bytenr += processed_len; 2018 mapped_datav += processed_len >> PAGE_SHIFT; 2019 num_pages -= processed_len >> PAGE_SHIFT; 2020 goto again; 2021 } 2022 2023 static void btrfsic_bio_end_io(struct bio *bp) 2024 { 2025 struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private; 2026 int iodone_w_error; 2027 2028 /* mutex is not held! This is not save if IO is not yet completed 2029 * on umount */ 2030 iodone_w_error = 0; 2031 if (bp->bi_status) 2032 iodone_w_error = 1; 2033 2034 BUG_ON(NULL == block); 2035 bp->bi_private = block->orig_bio_private; 2036 bp->bi_end_io = block->orig_bio_end_io; 2037 2038 do { 2039 struct btrfsic_block *next_block; 2040 struct btrfsic_dev_state *const dev_state = block->dev_state; 2041 2042 if ((dev_state->state->print_mask & 2043 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2044 pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n", 2045 bp->bi_status, 2046 btrfsic_get_block_type(dev_state->state, block), 2047 block->logical_bytenr, dev_state->name, 2048 block->dev_bytenr, block->mirror_num); 2049 next_block = block->next_in_same_bio; 2050 block->iodone_w_error = iodone_w_error; 2051 if (block->submit_bio_bh_rw & REQ_PREFLUSH) { 2052 dev_state->last_flush_gen++; 2053 if ((dev_state->state->print_mask & 2054 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2055 pr_info("bio_end_io() new %s flush_gen=%llu\n", 2056 dev_state->name, 2057 dev_state->last_flush_gen); 2058 } 2059 if (block->submit_bio_bh_rw & REQ_FUA) 2060 block->flush_gen = 0; /* FUA completed means block is 2061 * on disk */ 2062 block->is_iodone = 1; /* for FLUSH, this releases the block */ 2063 block = next_block; 2064 } while (NULL != block); 2065 2066 bp->bi_end_io(bp); 2067 } 2068 2069 static int btrfsic_process_written_superblock( 2070 struct btrfsic_state *state, 2071 struct btrfsic_block *const superblock, 2072 struct btrfs_super_block *const super_hdr) 2073 { 2074 struct btrfs_fs_info *fs_info = state->fs_info; 2075 int pass; 2076 2077 superblock->generation = btrfs_super_generation(super_hdr); 2078 if (!(superblock->generation > state->max_superblock_generation || 2079 0 == state->max_superblock_generation)) { 2080 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2081 pr_info("btrfsic: superblock @%llu (%s/%llu/%d) with old gen %llu <= %llu\n", 2082 superblock->logical_bytenr, 2083 superblock->dev_state->name, 2084 superblock->dev_bytenr, superblock->mirror_num, 2085 btrfs_super_generation(super_hdr), 2086 state->max_superblock_generation); 2087 } else { 2088 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2089 pr_info("btrfsic: got new superblock @%llu (%s/%llu/%d) with new gen %llu > %llu\n", 2090 superblock->logical_bytenr, 2091 superblock->dev_state->name, 2092 superblock->dev_bytenr, superblock->mirror_num, 2093 btrfs_super_generation(super_hdr), 2094 state->max_superblock_generation); 2095 2096 state->max_superblock_generation = 2097 btrfs_super_generation(super_hdr); 2098 state->latest_superblock = superblock; 2099 } 2100 2101 for (pass = 0; pass < 3; pass++) { 2102 int ret; 2103 u64 next_bytenr; 2104 struct btrfsic_block *next_block; 2105 struct btrfsic_block_data_ctx tmp_next_block_ctx; 2106 struct btrfsic_block_link *l; 2107 int num_copies; 2108 int mirror_num; 2109 const char *additional_string = NULL; 2110 struct btrfs_disk_key tmp_disk_key = {0}; 2111 2112 btrfs_set_disk_key_objectid(&tmp_disk_key, 2113 BTRFS_ROOT_ITEM_KEY); 2114 btrfs_set_disk_key_objectid(&tmp_disk_key, 0); 2115 2116 switch (pass) { 2117 case 0: 2118 btrfs_set_disk_key_objectid(&tmp_disk_key, 2119 BTRFS_ROOT_TREE_OBJECTID); 2120 additional_string = "root "; 2121 next_bytenr = btrfs_super_root(super_hdr); 2122 if (state->print_mask & 2123 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2124 pr_info("root@%llu\n", next_bytenr); 2125 break; 2126 case 1: 2127 btrfs_set_disk_key_objectid(&tmp_disk_key, 2128 BTRFS_CHUNK_TREE_OBJECTID); 2129 additional_string = "chunk "; 2130 next_bytenr = btrfs_super_chunk_root(super_hdr); 2131 if (state->print_mask & 2132 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2133 pr_info("chunk@%llu\n", next_bytenr); 2134 break; 2135 case 2: 2136 btrfs_set_disk_key_objectid(&tmp_disk_key, 2137 BTRFS_TREE_LOG_OBJECTID); 2138 additional_string = "log "; 2139 next_bytenr = btrfs_super_log_root(super_hdr); 2140 if (0 == next_bytenr) 2141 continue; 2142 if (state->print_mask & 2143 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2144 pr_info("log@%llu\n", next_bytenr); 2145 break; 2146 } 2147 2148 num_copies = btrfs_num_copies(fs_info, next_bytenr, 2149 BTRFS_SUPER_INFO_SIZE); 2150 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 2151 pr_info("num_copies(log_bytenr=%llu) = %d\n", 2152 next_bytenr, num_copies); 2153 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2154 int was_created; 2155 2156 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2157 pr_info("btrfsic_process_written_superblock(mirror_num=%d)\n", mirror_num); 2158 ret = btrfsic_map_block(state, next_bytenr, 2159 BTRFS_SUPER_INFO_SIZE, 2160 &tmp_next_block_ctx, 2161 mirror_num); 2162 if (ret) { 2163 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 2164 next_bytenr, mirror_num); 2165 return -1; 2166 } 2167 2168 next_block = btrfsic_block_lookup_or_add( 2169 state, 2170 &tmp_next_block_ctx, 2171 additional_string, 2172 1, 0, 1, 2173 mirror_num, 2174 &was_created); 2175 if (NULL == next_block) { 2176 btrfsic_release_block_ctx(&tmp_next_block_ctx); 2177 return -1; 2178 } 2179 2180 next_block->disk_key = tmp_disk_key; 2181 if (was_created) 2182 next_block->generation = 2183 BTRFSIC_GENERATION_UNKNOWN; 2184 l = btrfsic_block_link_lookup_or_add( 2185 state, 2186 &tmp_next_block_ctx, 2187 next_block, 2188 superblock, 2189 BTRFSIC_GENERATION_UNKNOWN); 2190 btrfsic_release_block_ctx(&tmp_next_block_ctx); 2191 if (NULL == l) 2192 return -1; 2193 } 2194 } 2195 2196 if (WARN_ON(-1 == btrfsic_check_all_ref_blocks(state, superblock, 0))) 2197 btrfsic_dump_tree(state); 2198 2199 return 0; 2200 } 2201 2202 static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, 2203 struct btrfsic_block *const block, 2204 int recursion_level) 2205 { 2206 const struct btrfsic_block_link *l; 2207 int ret = 0; 2208 2209 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { 2210 /* 2211 * Note that this situation can happen and does not 2212 * indicate an error in regular cases. It happens 2213 * when disk blocks are freed and later reused. 2214 * The check-integrity module is not aware of any 2215 * block free operations, it just recognizes block 2216 * write operations. Therefore it keeps the linkage 2217 * information for a block until a block is 2218 * rewritten. This can temporarily cause incorrect 2219 * and even circular linkage information. This 2220 * causes no harm unless such blocks are referenced 2221 * by the most recent super block. 2222 */ 2223 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2224 pr_info("btrfsic: abort cyclic linkage (case 1).\n"); 2225 2226 return ret; 2227 } 2228 2229 /* 2230 * This algorithm is recursive because the amount of used stack 2231 * space is very small and the max recursion depth is limited. 2232 */ 2233 list_for_each_entry(l, &block->ref_to_list, node_ref_to) { 2234 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2235 pr_info("rl=%d, %c @%llu (%s/%llu/%d) %u* refers to %c @%llu (%s/%llu/%d)\n", 2236 recursion_level, 2237 btrfsic_get_block_type(state, block), 2238 block->logical_bytenr, block->dev_state->name, 2239 block->dev_bytenr, block->mirror_num, 2240 l->ref_cnt, 2241 btrfsic_get_block_type(state, l->block_ref_to), 2242 l->block_ref_to->logical_bytenr, 2243 l->block_ref_to->dev_state->name, 2244 l->block_ref_to->dev_bytenr, 2245 l->block_ref_to->mirror_num); 2246 if (l->block_ref_to->never_written) { 2247 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is never written!\n", 2248 btrfsic_get_block_type(state, l->block_ref_to), 2249 l->block_ref_to->logical_bytenr, 2250 l->block_ref_to->dev_state->name, 2251 l->block_ref_to->dev_bytenr, 2252 l->block_ref_to->mirror_num); 2253 ret = -1; 2254 } else if (!l->block_ref_to->is_iodone) { 2255 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not yet iodone!\n", 2256 btrfsic_get_block_type(state, l->block_ref_to), 2257 l->block_ref_to->logical_bytenr, 2258 l->block_ref_to->dev_state->name, 2259 l->block_ref_to->dev_bytenr, 2260 l->block_ref_to->mirror_num); 2261 ret = -1; 2262 } else if (l->block_ref_to->iodone_w_error) { 2263 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which has write error!\n", 2264 btrfsic_get_block_type(state, l->block_ref_to), 2265 l->block_ref_to->logical_bytenr, 2266 l->block_ref_to->dev_state->name, 2267 l->block_ref_to->dev_bytenr, 2268 l->block_ref_to->mirror_num); 2269 ret = -1; 2270 } else if (l->parent_generation != 2271 l->block_ref_to->generation && 2272 BTRFSIC_GENERATION_UNKNOWN != 2273 l->parent_generation && 2274 BTRFSIC_GENERATION_UNKNOWN != 2275 l->block_ref_to->generation) { 2276 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) with generation %llu != parent generation %llu!\n", 2277 btrfsic_get_block_type(state, l->block_ref_to), 2278 l->block_ref_to->logical_bytenr, 2279 l->block_ref_to->dev_state->name, 2280 l->block_ref_to->dev_bytenr, 2281 l->block_ref_to->mirror_num, 2282 l->block_ref_to->generation, 2283 l->parent_generation); 2284 ret = -1; 2285 } else if (l->block_ref_to->flush_gen > 2286 l->block_ref_to->dev_state->last_flush_gen) { 2287 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not flushed out of disk's write cache (block flush_gen=%llu, dev->flush_gen=%llu)!\n", 2288 btrfsic_get_block_type(state, l->block_ref_to), 2289 l->block_ref_to->logical_bytenr, 2290 l->block_ref_to->dev_state->name, 2291 l->block_ref_to->dev_bytenr, 2292 l->block_ref_to->mirror_num, block->flush_gen, 2293 l->block_ref_to->dev_state->last_flush_gen); 2294 ret = -1; 2295 } else if (-1 == btrfsic_check_all_ref_blocks(state, 2296 l->block_ref_to, 2297 recursion_level + 2298 1)) { 2299 ret = -1; 2300 } 2301 } 2302 2303 return ret; 2304 } 2305 2306 static int btrfsic_is_block_ref_by_superblock( 2307 const struct btrfsic_state *state, 2308 const struct btrfsic_block *block, 2309 int recursion_level) 2310 { 2311 const struct btrfsic_block_link *l; 2312 2313 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { 2314 /* refer to comment at "abort cyclic linkage (case 1)" */ 2315 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2316 pr_info("btrfsic: abort cyclic linkage (case 2).\n"); 2317 2318 return 0; 2319 } 2320 2321 /* 2322 * This algorithm is recursive because the amount of used stack space 2323 * is very small and the max recursion depth is limited. 2324 */ 2325 list_for_each_entry(l, &block->ref_from_list, node_ref_from) { 2326 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2327 pr_info("rl=%d, %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n", 2328 recursion_level, 2329 btrfsic_get_block_type(state, block), 2330 block->logical_bytenr, block->dev_state->name, 2331 block->dev_bytenr, block->mirror_num, 2332 l->ref_cnt, 2333 btrfsic_get_block_type(state, l->block_ref_from), 2334 l->block_ref_from->logical_bytenr, 2335 l->block_ref_from->dev_state->name, 2336 l->block_ref_from->dev_bytenr, 2337 l->block_ref_from->mirror_num); 2338 if (l->block_ref_from->is_superblock && 2339 state->latest_superblock->dev_bytenr == 2340 l->block_ref_from->dev_bytenr && 2341 state->latest_superblock->dev_state->bdev == 2342 l->block_ref_from->dev_state->bdev) 2343 return 1; 2344 else if (btrfsic_is_block_ref_by_superblock(state, 2345 l->block_ref_from, 2346 recursion_level + 2347 1)) 2348 return 1; 2349 } 2350 2351 return 0; 2352 } 2353 2354 static void btrfsic_print_add_link(const struct btrfsic_state *state, 2355 const struct btrfsic_block_link *l) 2356 { 2357 pr_info("Add %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n", 2358 l->ref_cnt, 2359 btrfsic_get_block_type(state, l->block_ref_from), 2360 l->block_ref_from->logical_bytenr, 2361 l->block_ref_from->dev_state->name, 2362 l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, 2363 btrfsic_get_block_type(state, l->block_ref_to), 2364 l->block_ref_to->logical_bytenr, 2365 l->block_ref_to->dev_state->name, l->block_ref_to->dev_bytenr, 2366 l->block_ref_to->mirror_num); 2367 } 2368 2369 static void btrfsic_print_rem_link(const struct btrfsic_state *state, 2370 const struct btrfsic_block_link *l) 2371 { 2372 pr_info("Rem %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n", 2373 l->ref_cnt, 2374 btrfsic_get_block_type(state, l->block_ref_from), 2375 l->block_ref_from->logical_bytenr, 2376 l->block_ref_from->dev_state->name, 2377 l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, 2378 btrfsic_get_block_type(state, l->block_ref_to), 2379 l->block_ref_to->logical_bytenr, 2380 l->block_ref_to->dev_state->name, l->block_ref_to->dev_bytenr, 2381 l->block_ref_to->mirror_num); 2382 } 2383 2384 static char btrfsic_get_block_type(const struct btrfsic_state *state, 2385 const struct btrfsic_block *block) 2386 { 2387 if (block->is_superblock && 2388 state->latest_superblock->dev_bytenr == block->dev_bytenr && 2389 state->latest_superblock->dev_state->bdev == block->dev_state->bdev) 2390 return 'S'; 2391 else if (block->is_superblock) 2392 return 's'; 2393 else if (block->is_metadata) 2394 return 'M'; 2395 else 2396 return 'D'; 2397 } 2398 2399 static void btrfsic_dump_tree(const struct btrfsic_state *state) 2400 { 2401 btrfsic_dump_tree_sub(state, state->latest_superblock, 0); 2402 } 2403 2404 static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, 2405 const struct btrfsic_block *block, 2406 int indent_level) 2407 { 2408 const struct btrfsic_block_link *l; 2409 int indent_add; 2410 static char buf[80]; 2411 int cursor_position; 2412 2413 /* 2414 * Should better fill an on-stack buffer with a complete line and 2415 * dump it at once when it is time to print a newline character. 2416 */ 2417 2418 /* 2419 * This algorithm is recursive because the amount of used stack space 2420 * is very small and the max recursion depth is limited. 2421 */ 2422 indent_add = sprintf(buf, "%c-%llu(%s/%llu/%u)", 2423 btrfsic_get_block_type(state, block), 2424 block->logical_bytenr, block->dev_state->name, 2425 block->dev_bytenr, block->mirror_num); 2426 if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { 2427 printk("[...]\n"); 2428 return; 2429 } 2430 printk(buf); 2431 indent_level += indent_add; 2432 if (list_empty(&block->ref_to_list)) { 2433 printk("\n"); 2434 return; 2435 } 2436 if (block->mirror_num > 1 && 2437 !(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) { 2438 printk(" [...]\n"); 2439 return; 2440 } 2441 2442 cursor_position = indent_level; 2443 list_for_each_entry(l, &block->ref_to_list, node_ref_to) { 2444 while (cursor_position < indent_level) { 2445 printk(" "); 2446 cursor_position++; 2447 } 2448 if (l->ref_cnt > 1) 2449 indent_add = sprintf(buf, " %d*--> ", l->ref_cnt); 2450 else 2451 indent_add = sprintf(buf, " --> "); 2452 if (indent_level + indent_add > 2453 BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { 2454 printk("[...]\n"); 2455 cursor_position = 0; 2456 continue; 2457 } 2458 2459 printk(buf); 2460 2461 btrfsic_dump_tree_sub(state, l->block_ref_to, 2462 indent_level + indent_add); 2463 cursor_position = 0; 2464 } 2465 } 2466 2467 static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( 2468 struct btrfsic_state *state, 2469 struct btrfsic_block_data_ctx *next_block_ctx, 2470 struct btrfsic_block *next_block, 2471 struct btrfsic_block *from_block, 2472 u64 parent_generation) 2473 { 2474 struct btrfsic_block_link *l; 2475 2476 l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev, 2477 next_block_ctx->dev_bytenr, 2478 from_block->dev_state->bdev, 2479 from_block->dev_bytenr, 2480 &state->block_link_hashtable); 2481 if (NULL == l) { 2482 l = btrfsic_block_link_alloc(); 2483 if (!l) 2484 return NULL; 2485 2486 l->block_ref_to = next_block; 2487 l->block_ref_from = from_block; 2488 l->ref_cnt = 1; 2489 l->parent_generation = parent_generation; 2490 2491 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2492 btrfsic_print_add_link(state, l); 2493 2494 list_add(&l->node_ref_to, &from_block->ref_to_list); 2495 list_add(&l->node_ref_from, &next_block->ref_from_list); 2496 2497 btrfsic_block_link_hashtable_add(l, 2498 &state->block_link_hashtable); 2499 } else { 2500 l->ref_cnt++; 2501 l->parent_generation = parent_generation; 2502 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2503 btrfsic_print_add_link(state, l); 2504 } 2505 2506 return l; 2507 } 2508 2509 static struct btrfsic_block *btrfsic_block_lookup_or_add( 2510 struct btrfsic_state *state, 2511 struct btrfsic_block_data_ctx *block_ctx, 2512 const char *additional_string, 2513 int is_metadata, 2514 int is_iodone, 2515 int never_written, 2516 int mirror_num, 2517 int *was_created) 2518 { 2519 struct btrfsic_block *block; 2520 2521 block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev, 2522 block_ctx->dev_bytenr, 2523 &state->block_hashtable); 2524 if (NULL == block) { 2525 struct btrfsic_dev_state *dev_state; 2526 2527 block = btrfsic_block_alloc(); 2528 if (!block) 2529 return NULL; 2530 2531 dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev->bd_dev); 2532 if (NULL == dev_state) { 2533 pr_info("btrfsic: error, lookup dev_state failed!\n"); 2534 btrfsic_block_free(block); 2535 return NULL; 2536 } 2537 block->dev_state = dev_state; 2538 block->dev_bytenr = block_ctx->dev_bytenr; 2539 block->logical_bytenr = block_ctx->start; 2540 block->is_metadata = is_metadata; 2541 block->is_iodone = is_iodone; 2542 block->never_written = never_written; 2543 block->mirror_num = mirror_num; 2544 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2545 pr_info("New %s%c-block @%llu (%s/%llu/%d)\n", 2546 additional_string, 2547 btrfsic_get_block_type(state, block), 2548 block->logical_bytenr, dev_state->name, 2549 block->dev_bytenr, mirror_num); 2550 list_add(&block->all_blocks_node, &state->all_blocks_list); 2551 btrfsic_block_hashtable_add(block, &state->block_hashtable); 2552 if (NULL != was_created) 2553 *was_created = 1; 2554 } else { 2555 if (NULL != was_created) 2556 *was_created = 0; 2557 } 2558 2559 return block; 2560 } 2561 2562 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 2563 u64 bytenr, 2564 struct btrfsic_dev_state *dev_state, 2565 u64 dev_bytenr) 2566 { 2567 struct btrfs_fs_info *fs_info = state->fs_info; 2568 struct btrfsic_block_data_ctx block_ctx; 2569 int num_copies; 2570 int mirror_num; 2571 int match = 0; 2572 int ret; 2573 2574 num_copies = btrfs_num_copies(fs_info, bytenr, state->metablock_size); 2575 2576 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2577 ret = btrfsic_map_block(state, bytenr, state->metablock_size, 2578 &block_ctx, mirror_num); 2579 if (ret) { 2580 pr_info("btrfsic: btrfsic_map_block(logical @%llu, mirror %d) failed!\n", 2581 bytenr, mirror_num); 2582 continue; 2583 } 2584 2585 if (dev_state->bdev == block_ctx.dev->bdev && 2586 dev_bytenr == block_ctx.dev_bytenr) { 2587 match++; 2588 btrfsic_release_block_ctx(&block_ctx); 2589 break; 2590 } 2591 btrfsic_release_block_ctx(&block_ctx); 2592 } 2593 2594 if (WARN_ON(!match)) { 2595 pr_info("btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio, buffer->log_bytenr=%llu, submit_bio(bdev=%s, phys_bytenr=%llu)!\n", 2596 bytenr, dev_state->name, dev_bytenr); 2597 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2598 ret = btrfsic_map_block(state, bytenr, 2599 state->metablock_size, 2600 &block_ctx, mirror_num); 2601 if (ret) 2602 continue; 2603 2604 pr_info("Read logical bytenr @%llu maps to (%s/%llu/%d)\n", 2605 bytenr, block_ctx.dev->name, 2606 block_ctx.dev_bytenr, mirror_num); 2607 } 2608 } 2609 } 2610 2611 static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev) 2612 { 2613 return btrfsic_dev_state_hashtable_lookup(dev, 2614 &btrfsic_dev_state_hashtable); 2615 } 2616 2617 static void __btrfsic_submit_bio(struct bio *bio) 2618 { 2619 struct btrfsic_dev_state *dev_state; 2620 2621 if (!btrfsic_is_initialized) 2622 return; 2623 2624 mutex_lock(&btrfsic_mutex); 2625 /* since btrfsic_submit_bio() is also called before 2626 * btrfsic_mount(), this might return NULL */ 2627 dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev); 2628 if (NULL != dev_state && 2629 (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) { 2630 int i = 0; 2631 u64 dev_bytenr; 2632 u64 cur_bytenr; 2633 struct bio_vec bvec; 2634 struct bvec_iter iter; 2635 int bio_is_patched; 2636 char **mapped_datav; 2637 unsigned int segs = bio_segments(bio); 2638 2639 dev_bytenr = 512 * bio->bi_iter.bi_sector; 2640 bio_is_patched = 0; 2641 if (dev_state->state->print_mask & 2642 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2643 pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", 2644 bio_op(bio), bio->bi_opf, segs, 2645 bio->bi_iter.bi_sector, dev_bytenr, bio->bi_bdev); 2646 2647 mapped_datav = kmalloc_array(segs, 2648 sizeof(*mapped_datav), GFP_NOFS); 2649 if (!mapped_datav) 2650 goto leave; 2651 cur_bytenr = dev_bytenr; 2652 2653 bio_for_each_segment(bvec, bio, iter) { 2654 BUG_ON(bvec.bv_len != PAGE_SIZE); 2655 mapped_datav[i] = page_address(bvec.bv_page); 2656 i++; 2657 2658 if (dev_state->state->print_mask & 2659 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE) 2660 pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n", 2661 i, cur_bytenr, bvec.bv_len, bvec.bv_offset); 2662 cur_bytenr += bvec.bv_len; 2663 } 2664 btrfsic_process_written_block(dev_state, dev_bytenr, 2665 mapped_datav, segs, 2666 bio, &bio_is_patched, 2667 bio->bi_opf); 2668 kfree(mapped_datav); 2669 } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) { 2670 if (dev_state->state->print_mask & 2671 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2672 pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n", 2673 bio_op(bio), bio->bi_opf, bio->bi_bdev); 2674 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2675 if ((dev_state->state->print_mask & 2676 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 2677 BTRFSIC_PRINT_MASK_VERBOSE))) 2678 pr_info("btrfsic_submit_bio(%s) with FLUSH but dummy block already in use (ignored)!\n", 2679 dev_state->name); 2680 } else { 2681 struct btrfsic_block *const block = 2682 &dev_state->dummy_block_for_bio_bh_flush; 2683 2684 block->is_iodone = 0; 2685 block->never_written = 0; 2686 block->iodone_w_error = 0; 2687 block->flush_gen = dev_state->last_flush_gen + 1; 2688 block->submit_bio_bh_rw = bio->bi_opf; 2689 block->orig_bio_private = bio->bi_private; 2690 block->orig_bio_end_io = bio->bi_end_io; 2691 block->next_in_same_bio = NULL; 2692 bio->bi_private = block; 2693 bio->bi_end_io = btrfsic_bio_end_io; 2694 } 2695 } 2696 leave: 2697 mutex_unlock(&btrfsic_mutex); 2698 } 2699 2700 void btrfsic_submit_bio(struct bio *bio) 2701 { 2702 __btrfsic_submit_bio(bio); 2703 submit_bio(bio); 2704 } 2705 2706 int btrfsic_submit_bio_wait(struct bio *bio) 2707 { 2708 __btrfsic_submit_bio(bio); 2709 return submit_bio_wait(bio); 2710 } 2711 2712 int btrfsic_mount(struct btrfs_fs_info *fs_info, 2713 struct btrfs_fs_devices *fs_devices, 2714 int including_extent_data, u32 print_mask) 2715 { 2716 int ret; 2717 struct btrfsic_state *state; 2718 struct list_head *dev_head = &fs_devices->devices; 2719 struct btrfs_device *device; 2720 2721 if (!PAGE_ALIGNED(fs_info->nodesize)) { 2722 pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n", 2723 fs_info->nodesize, PAGE_SIZE); 2724 return -1; 2725 } 2726 if (!PAGE_ALIGNED(fs_info->sectorsize)) { 2727 pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n", 2728 fs_info->sectorsize, PAGE_SIZE); 2729 return -1; 2730 } 2731 state = kvzalloc(sizeof(*state), GFP_KERNEL); 2732 if (!state) 2733 return -ENOMEM; 2734 2735 if (!btrfsic_is_initialized) { 2736 mutex_init(&btrfsic_mutex); 2737 btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable); 2738 btrfsic_is_initialized = 1; 2739 } 2740 mutex_lock(&btrfsic_mutex); 2741 state->fs_info = fs_info; 2742 state->print_mask = print_mask; 2743 state->include_extent_data = including_extent_data; 2744 state->metablock_size = fs_info->nodesize; 2745 state->datablock_size = fs_info->sectorsize; 2746 INIT_LIST_HEAD(&state->all_blocks_list); 2747 btrfsic_block_hashtable_init(&state->block_hashtable); 2748 btrfsic_block_link_hashtable_init(&state->block_link_hashtable); 2749 state->max_superblock_generation = 0; 2750 state->latest_superblock = NULL; 2751 2752 list_for_each_entry(device, dev_head, dev_list) { 2753 struct btrfsic_dev_state *ds; 2754 const char *p; 2755 2756 if (!device->bdev || !device->name) 2757 continue; 2758 2759 ds = btrfsic_dev_state_alloc(); 2760 if (NULL == ds) { 2761 mutex_unlock(&btrfsic_mutex); 2762 return -ENOMEM; 2763 } 2764 ds->bdev = device->bdev; 2765 ds->state = state; 2766 bdevname(ds->bdev, ds->name); 2767 ds->name[BDEVNAME_SIZE - 1] = '\0'; 2768 p = kbasename(ds->name); 2769 strlcpy(ds->name, p, sizeof(ds->name)); 2770 btrfsic_dev_state_hashtable_add(ds, 2771 &btrfsic_dev_state_hashtable); 2772 } 2773 2774 ret = btrfsic_process_superblock(state, fs_devices); 2775 if (0 != ret) { 2776 mutex_unlock(&btrfsic_mutex); 2777 btrfsic_unmount(fs_devices); 2778 return ret; 2779 } 2780 2781 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE) 2782 btrfsic_dump_database(state); 2783 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE) 2784 btrfsic_dump_tree(state); 2785 2786 mutex_unlock(&btrfsic_mutex); 2787 return 0; 2788 } 2789 2790 void btrfsic_unmount(struct btrfs_fs_devices *fs_devices) 2791 { 2792 struct btrfsic_block *b_all, *tmp_all; 2793 struct btrfsic_state *state; 2794 struct list_head *dev_head = &fs_devices->devices; 2795 struct btrfs_device *device; 2796 2797 if (!btrfsic_is_initialized) 2798 return; 2799 2800 mutex_lock(&btrfsic_mutex); 2801 2802 state = NULL; 2803 list_for_each_entry(device, dev_head, dev_list) { 2804 struct btrfsic_dev_state *ds; 2805 2806 if (!device->bdev || !device->name) 2807 continue; 2808 2809 ds = btrfsic_dev_state_hashtable_lookup( 2810 device->bdev->bd_dev, 2811 &btrfsic_dev_state_hashtable); 2812 if (NULL != ds) { 2813 state = ds->state; 2814 btrfsic_dev_state_hashtable_remove(ds); 2815 btrfsic_dev_state_free(ds); 2816 } 2817 } 2818 2819 if (NULL == state) { 2820 pr_info("btrfsic: error, cannot find state information on umount!\n"); 2821 mutex_unlock(&btrfsic_mutex); 2822 return; 2823 } 2824 2825 /* 2826 * Don't care about keeping the lists' state up to date, 2827 * just free all memory that was allocated dynamically. 2828 * Free the blocks and the block_links. 2829 */ 2830 list_for_each_entry_safe(b_all, tmp_all, &state->all_blocks_list, 2831 all_blocks_node) { 2832 struct btrfsic_block_link *l, *tmp; 2833 2834 list_for_each_entry_safe(l, tmp, &b_all->ref_to_list, 2835 node_ref_to) { 2836 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2837 btrfsic_print_rem_link(state, l); 2838 2839 l->ref_cnt--; 2840 if (0 == l->ref_cnt) 2841 btrfsic_block_link_free(l); 2842 } 2843 2844 if (b_all->is_iodone || b_all->never_written) 2845 btrfsic_block_free(b_all); 2846 else 2847 pr_info("btrfs: attempt to free %c-block @%llu (%s/%llu/%d) on umount which is not yet iodone!\n", 2848 btrfsic_get_block_type(state, b_all), 2849 b_all->logical_bytenr, b_all->dev_state->name, 2850 b_all->dev_bytenr, b_all->mirror_num); 2851 } 2852 2853 mutex_unlock(&btrfsic_mutex); 2854 2855 kvfree(state); 2856 } 2857