1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) STRATO AG 2011. All rights reserved. 4 */ 5 6 /* 7 * This module can be used to catch cases when the btrfs kernel 8 * code executes write requests to the disk that bring the file 9 * system in an inconsistent state. In such a state, a power-loss 10 * or kernel panic event would cause that the data on disk is 11 * lost or at least damaged. 12 * 13 * Code is added that examines all block write requests during 14 * runtime (including writes of the super block). Three rules 15 * are verified and an error is printed on violation of the 16 * rules: 17 * 1. It is not allowed to write a disk block which is 18 * currently referenced by the super block (either directly 19 * or indirectly). 20 * 2. When a super block is written, it is verified that all 21 * referenced (directly or indirectly) blocks fulfill the 22 * following requirements: 23 * 2a. All referenced blocks have either been present when 24 * the file system was mounted, (i.e., they have been 25 * referenced by the super block) or they have been 26 * written since then and the write completion callback 27 * was called and no write error was indicated and a 28 * FLUSH request to the device where these blocks are 29 * located was received and completed. 30 * 2b. All referenced blocks need to have a generation 31 * number which is equal to the parent's number. 32 * 33 * One issue that was found using this module was that the log 34 * tree on disk became temporarily corrupted because disk blocks 35 * that had been in use for the log tree had been freed and 36 * reused too early, while being referenced by the written super 37 * block. 38 * 39 * The search term in the kernel log that can be used to filter 40 * on the existence of detected integrity issues is 41 * "btrfs: attempt". 42 * 43 * The integrity check is enabled via mount options. These 44 * mount options are only supported if the integrity check 45 * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY. 46 * 47 * Example #1, apply integrity checks to all metadata: 48 * mount /dev/sdb1 /mnt -o check_int 49 * 50 * Example #2, apply integrity checks to all metadata and 51 * to data extents: 52 * mount /dev/sdb1 /mnt -o check_int_data 53 * 54 * Example #3, apply integrity checks to all metadata and dump 55 * the tree that the super block references to kernel messages 56 * each time after a super block was written: 57 * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263 58 * 59 * If the integrity check tool is included and activated in 60 * the mount options, plenty of kernel memory is used, and 61 * plenty of additional CPU cycles are spent. Enabling this 62 * functionality is not intended for normal use. In most 63 * cases, unless you are a btrfs developer who needs to verify 64 * the integrity of (super)-block write requests, do not 65 * enable the config option BTRFS_FS_CHECK_INTEGRITY to 66 * include and compile the integrity check tool. 67 * 68 * Expect millions of lines of information in the kernel log with an 69 * enabled check_int_print_mask. Therefore set LOG_BUF_SHIFT in the 70 * kernel config to at least 26 (which is 64MB). Usually the value is 71 * limited to 21 (which is 2MB) in init/Kconfig. The file needs to be 72 * changed like this before LOG_BUF_SHIFT can be set to a high value: 73 * config LOG_BUF_SHIFT 74 * int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" 75 * range 12 30 76 */ 77 78 #include <linux/sched.h> 79 #include <linux/slab.h> 80 #include <linux/mutex.h> 81 #include <linux/genhd.h> 82 #include <linux/blkdev.h> 83 #include <linux/mm.h> 84 #include <linux/string.h> 85 #include <crypto/hash.h> 86 #include "ctree.h" 87 #include "disk-io.h" 88 #include "transaction.h" 89 #include "extent_io.h" 90 #include "volumes.h" 91 #include "print-tree.h" 92 #include "locking.h" 93 #include "check-integrity.h" 94 #include "rcu-string.h" 95 #include "compression.h" 96 97 #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 98 #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 99 #define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100 100 #define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051 101 #define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807 102 #define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530 103 #define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300 104 #define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters, 105 * excluding " [...]" */ 106 #define BTRFSIC_GENERATION_UNKNOWN ((u64)-1) 107 108 /* 109 * The definition of the bitmask fields for the print_mask. 110 * They are specified with the mount option check_integrity_print_mask. 111 */ 112 #define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE 0x00000001 113 #define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION 0x00000002 114 #define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE 0x00000004 115 #define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE 0x00000008 116 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH 0x00000010 117 #define BTRFSIC_PRINT_MASK_END_IO_BIO_BH 0x00000020 118 #define BTRFSIC_PRINT_MASK_VERBOSE 0x00000040 119 #define BTRFSIC_PRINT_MASK_VERY_VERBOSE 0x00000080 120 #define BTRFSIC_PRINT_MASK_INITIAL_TREE 0x00000100 121 #define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES 0x00000200 122 #define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400 123 #define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800 124 #define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000 125 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE 0x00002000 126 127 struct btrfsic_dev_state; 128 struct btrfsic_state; 129 130 struct btrfsic_block { 131 u32 magic_num; /* only used for debug purposes */ 132 unsigned int is_metadata:1; /* if it is meta-data, not data-data */ 133 unsigned int is_superblock:1; /* if it is one of the superblocks */ 134 unsigned int is_iodone:1; /* if is done by lower subsystem */ 135 unsigned int iodone_w_error:1; /* error was indicated to endio */ 136 unsigned int never_written:1; /* block was added because it was 137 * referenced, not because it was 138 * written */ 139 unsigned int mirror_num; /* large enough to hold 140 * BTRFS_SUPER_MIRROR_MAX */ 141 struct btrfsic_dev_state *dev_state; 142 u64 dev_bytenr; /* key, physical byte num on disk */ 143 u64 logical_bytenr; /* logical byte num on disk */ 144 u64 generation; 145 struct btrfs_disk_key disk_key; /* extra info to print in case of 146 * issues, will not always be correct */ 147 struct list_head collision_resolving_node; /* list node */ 148 struct list_head all_blocks_node; /* list node */ 149 150 /* the following two lists contain block_link items */ 151 struct list_head ref_to_list; /* list */ 152 struct list_head ref_from_list; /* list */ 153 struct btrfsic_block *next_in_same_bio; 154 void *orig_bio_private; 155 bio_end_io_t *orig_bio_end_io; 156 int submit_bio_bh_rw; 157 u64 flush_gen; /* only valid if !never_written */ 158 }; 159 160 /* 161 * Elements of this type are allocated dynamically and required because 162 * each block object can refer to and can be ref from multiple blocks. 163 * The key to lookup them in the hashtable is the dev_bytenr of 164 * the block ref to plus the one from the block referred from. 165 * The fact that they are searchable via a hashtable and that a 166 * ref_cnt is maintained is not required for the btrfs integrity 167 * check algorithm itself, it is only used to make the output more 168 * beautiful in case that an error is detected (an error is defined 169 * as a write operation to a block while that block is still referenced). 170 */ 171 struct btrfsic_block_link { 172 u32 magic_num; /* only used for debug purposes */ 173 u32 ref_cnt; 174 struct list_head node_ref_to; /* list node */ 175 struct list_head node_ref_from; /* list node */ 176 struct list_head collision_resolving_node; /* list node */ 177 struct btrfsic_block *block_ref_to; 178 struct btrfsic_block *block_ref_from; 179 u64 parent_generation; 180 }; 181 182 struct btrfsic_dev_state { 183 u32 magic_num; /* only used for debug purposes */ 184 struct block_device *bdev; 185 struct btrfsic_state *state; 186 struct list_head collision_resolving_node; /* list node */ 187 struct btrfsic_block dummy_block_for_bio_bh_flush; 188 u64 last_flush_gen; 189 char name[BDEVNAME_SIZE]; 190 }; 191 192 struct btrfsic_block_hashtable { 193 struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE]; 194 }; 195 196 struct btrfsic_block_link_hashtable { 197 struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE]; 198 }; 199 200 struct btrfsic_dev_state_hashtable { 201 struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE]; 202 }; 203 204 struct btrfsic_block_data_ctx { 205 u64 start; /* virtual bytenr */ 206 u64 dev_bytenr; /* physical bytenr on device */ 207 u32 len; 208 struct btrfsic_dev_state *dev; 209 char **datav; 210 struct page **pagev; 211 void *mem_to_free; 212 }; 213 214 /* This structure is used to implement recursion without occupying 215 * any stack space, refer to btrfsic_process_metablock() */ 216 struct btrfsic_stack_frame { 217 u32 magic; 218 u32 nr; 219 int error; 220 int i; 221 int limit_nesting; 222 int num_copies; 223 int mirror_num; 224 struct btrfsic_block *block; 225 struct btrfsic_block_data_ctx *block_ctx; 226 struct btrfsic_block *next_block; 227 struct btrfsic_block_data_ctx next_block_ctx; 228 struct btrfs_header *hdr; 229 struct btrfsic_stack_frame *prev; 230 }; 231 232 /* Some state per mounted filesystem */ 233 struct btrfsic_state { 234 u32 print_mask; 235 int include_extent_data; 236 int csum_size; 237 struct list_head all_blocks_list; 238 struct btrfsic_block_hashtable block_hashtable; 239 struct btrfsic_block_link_hashtable block_link_hashtable; 240 struct btrfs_fs_info *fs_info; 241 u64 max_superblock_generation; 242 struct btrfsic_block *latest_superblock; 243 u32 metablock_size; 244 u32 datablock_size; 245 }; 246 247 static void btrfsic_block_init(struct btrfsic_block *b); 248 static struct btrfsic_block *btrfsic_block_alloc(void); 249 static void btrfsic_block_free(struct btrfsic_block *b); 250 static void btrfsic_block_link_init(struct btrfsic_block_link *n); 251 static struct btrfsic_block_link *btrfsic_block_link_alloc(void); 252 static void btrfsic_block_link_free(struct btrfsic_block_link *n); 253 static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds); 254 static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void); 255 static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds); 256 static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h); 257 static void btrfsic_block_hashtable_add(struct btrfsic_block *b, 258 struct btrfsic_block_hashtable *h); 259 static void btrfsic_block_hashtable_remove(struct btrfsic_block *b); 260 static struct btrfsic_block *btrfsic_block_hashtable_lookup( 261 struct block_device *bdev, 262 u64 dev_bytenr, 263 struct btrfsic_block_hashtable *h); 264 static void btrfsic_block_link_hashtable_init( 265 struct btrfsic_block_link_hashtable *h); 266 static void btrfsic_block_link_hashtable_add( 267 struct btrfsic_block_link *l, 268 struct btrfsic_block_link_hashtable *h); 269 static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l); 270 static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup( 271 struct block_device *bdev_ref_to, 272 u64 dev_bytenr_ref_to, 273 struct block_device *bdev_ref_from, 274 u64 dev_bytenr_ref_from, 275 struct btrfsic_block_link_hashtable *h); 276 static void btrfsic_dev_state_hashtable_init( 277 struct btrfsic_dev_state_hashtable *h); 278 static void btrfsic_dev_state_hashtable_add( 279 struct btrfsic_dev_state *ds, 280 struct btrfsic_dev_state_hashtable *h); 281 static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds); 282 static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev, 283 struct btrfsic_dev_state_hashtable *h); 284 static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void); 285 static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf); 286 static int btrfsic_process_superblock(struct btrfsic_state *state, 287 struct btrfs_fs_devices *fs_devices); 288 static int btrfsic_process_metablock(struct btrfsic_state *state, 289 struct btrfsic_block *block, 290 struct btrfsic_block_data_ctx *block_ctx, 291 int limit_nesting, int force_iodone_flag); 292 static void btrfsic_read_from_block_data( 293 struct btrfsic_block_data_ctx *block_ctx, 294 void *dst, u32 offset, size_t len); 295 static int btrfsic_create_link_to_next_block( 296 struct btrfsic_state *state, 297 struct btrfsic_block *block, 298 struct btrfsic_block_data_ctx 299 *block_ctx, u64 next_bytenr, 300 int limit_nesting, 301 struct btrfsic_block_data_ctx *next_block_ctx, 302 struct btrfsic_block **next_blockp, 303 int force_iodone_flag, 304 int *num_copiesp, int *mirror_nump, 305 struct btrfs_disk_key *disk_key, 306 u64 parent_generation); 307 static int btrfsic_handle_extent_data(struct btrfsic_state *state, 308 struct btrfsic_block *block, 309 struct btrfsic_block_data_ctx *block_ctx, 310 u32 item_offset, int force_iodone_flag); 311 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 312 struct btrfsic_block_data_ctx *block_ctx_out, 313 int mirror_num); 314 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx); 315 static int btrfsic_read_block(struct btrfsic_state *state, 316 struct btrfsic_block_data_ctx *block_ctx); 317 static void btrfsic_dump_database(struct btrfsic_state *state); 318 static int btrfsic_test_for_metadata(struct btrfsic_state *state, 319 char **datav, unsigned int num_pages); 320 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 321 u64 dev_bytenr, char **mapped_datav, 322 unsigned int num_pages, 323 struct bio *bio, int *bio_is_patched, 324 int submit_bio_bh_rw); 325 static int btrfsic_process_written_superblock( 326 struct btrfsic_state *state, 327 struct btrfsic_block *const block, 328 struct btrfs_super_block *const super_hdr); 329 static void btrfsic_bio_end_io(struct bio *bp); 330 static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state, 331 const struct btrfsic_block *block, 332 int recursion_level); 333 static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, 334 struct btrfsic_block *const block, 335 int recursion_level); 336 static void btrfsic_print_add_link(const struct btrfsic_state *state, 337 const struct btrfsic_block_link *l); 338 static void btrfsic_print_rem_link(const struct btrfsic_state *state, 339 const struct btrfsic_block_link *l); 340 static char btrfsic_get_block_type(const struct btrfsic_state *state, 341 const struct btrfsic_block *block); 342 static void btrfsic_dump_tree(const struct btrfsic_state *state); 343 static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, 344 const struct btrfsic_block *block, 345 int indent_level); 346 static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( 347 struct btrfsic_state *state, 348 struct btrfsic_block_data_ctx *next_block_ctx, 349 struct btrfsic_block *next_block, 350 struct btrfsic_block *from_block, 351 u64 parent_generation); 352 static struct btrfsic_block *btrfsic_block_lookup_or_add( 353 struct btrfsic_state *state, 354 struct btrfsic_block_data_ctx *block_ctx, 355 const char *additional_string, 356 int is_metadata, 357 int is_iodone, 358 int never_written, 359 int mirror_num, 360 int *was_created); 361 static int btrfsic_process_superblock_dev_mirror( 362 struct btrfsic_state *state, 363 struct btrfsic_dev_state *dev_state, 364 struct btrfs_device *device, 365 int superblock_mirror_num, 366 struct btrfsic_dev_state **selected_dev_state, 367 struct btrfs_super_block *selected_super); 368 static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev); 369 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 370 u64 bytenr, 371 struct btrfsic_dev_state *dev_state, 372 u64 dev_bytenr); 373 374 static struct mutex btrfsic_mutex; 375 static int btrfsic_is_initialized; 376 static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable; 377 378 379 static void btrfsic_block_init(struct btrfsic_block *b) 380 { 381 b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER; 382 b->dev_state = NULL; 383 b->dev_bytenr = 0; 384 b->logical_bytenr = 0; 385 b->generation = BTRFSIC_GENERATION_UNKNOWN; 386 b->disk_key.objectid = 0; 387 b->disk_key.type = 0; 388 b->disk_key.offset = 0; 389 b->is_metadata = 0; 390 b->is_superblock = 0; 391 b->is_iodone = 0; 392 b->iodone_w_error = 0; 393 b->never_written = 0; 394 b->mirror_num = 0; 395 b->next_in_same_bio = NULL; 396 b->orig_bio_private = NULL; 397 b->orig_bio_end_io = NULL; 398 INIT_LIST_HEAD(&b->collision_resolving_node); 399 INIT_LIST_HEAD(&b->all_blocks_node); 400 INIT_LIST_HEAD(&b->ref_to_list); 401 INIT_LIST_HEAD(&b->ref_from_list); 402 b->submit_bio_bh_rw = 0; 403 b->flush_gen = 0; 404 } 405 406 static struct btrfsic_block *btrfsic_block_alloc(void) 407 { 408 struct btrfsic_block *b; 409 410 b = kzalloc(sizeof(*b), GFP_NOFS); 411 if (NULL != b) 412 btrfsic_block_init(b); 413 414 return b; 415 } 416 417 static void btrfsic_block_free(struct btrfsic_block *b) 418 { 419 BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num)); 420 kfree(b); 421 } 422 423 static void btrfsic_block_link_init(struct btrfsic_block_link *l) 424 { 425 l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER; 426 l->ref_cnt = 1; 427 INIT_LIST_HEAD(&l->node_ref_to); 428 INIT_LIST_HEAD(&l->node_ref_from); 429 INIT_LIST_HEAD(&l->collision_resolving_node); 430 l->block_ref_to = NULL; 431 l->block_ref_from = NULL; 432 } 433 434 static struct btrfsic_block_link *btrfsic_block_link_alloc(void) 435 { 436 struct btrfsic_block_link *l; 437 438 l = kzalloc(sizeof(*l), GFP_NOFS); 439 if (NULL != l) 440 btrfsic_block_link_init(l); 441 442 return l; 443 } 444 445 static void btrfsic_block_link_free(struct btrfsic_block_link *l) 446 { 447 BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num)); 448 kfree(l); 449 } 450 451 static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds) 452 { 453 ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER; 454 ds->bdev = NULL; 455 ds->state = NULL; 456 ds->name[0] = '\0'; 457 INIT_LIST_HEAD(&ds->collision_resolving_node); 458 ds->last_flush_gen = 0; 459 btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush); 460 ds->dummy_block_for_bio_bh_flush.is_iodone = 1; 461 ds->dummy_block_for_bio_bh_flush.dev_state = ds; 462 } 463 464 static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void) 465 { 466 struct btrfsic_dev_state *ds; 467 468 ds = kzalloc(sizeof(*ds), GFP_NOFS); 469 if (NULL != ds) 470 btrfsic_dev_state_init(ds); 471 472 return ds; 473 } 474 475 static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds) 476 { 477 BUG_ON(!(NULL == ds || 478 BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num)); 479 kfree(ds); 480 } 481 482 static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h) 483 { 484 int i; 485 486 for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++) 487 INIT_LIST_HEAD(h->table + i); 488 } 489 490 static void btrfsic_block_hashtable_add(struct btrfsic_block *b, 491 struct btrfsic_block_hashtable *h) 492 { 493 const unsigned int hashval = 494 (((unsigned int)(b->dev_bytenr >> 16)) ^ 495 ((unsigned int)((uintptr_t)b->dev_state->bdev))) & 496 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); 497 498 list_add(&b->collision_resolving_node, h->table + hashval); 499 } 500 501 static void btrfsic_block_hashtable_remove(struct btrfsic_block *b) 502 { 503 list_del(&b->collision_resolving_node); 504 } 505 506 static struct btrfsic_block *btrfsic_block_hashtable_lookup( 507 struct block_device *bdev, 508 u64 dev_bytenr, 509 struct btrfsic_block_hashtable *h) 510 { 511 const unsigned int hashval = 512 (((unsigned int)(dev_bytenr >> 16)) ^ 513 ((unsigned int)((uintptr_t)bdev))) & 514 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); 515 struct btrfsic_block *b; 516 517 list_for_each_entry(b, h->table + hashval, collision_resolving_node) { 518 if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr) 519 return b; 520 } 521 522 return NULL; 523 } 524 525 static void btrfsic_block_link_hashtable_init( 526 struct btrfsic_block_link_hashtable *h) 527 { 528 int i; 529 530 for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++) 531 INIT_LIST_HEAD(h->table + i); 532 } 533 534 static void btrfsic_block_link_hashtable_add( 535 struct btrfsic_block_link *l, 536 struct btrfsic_block_link_hashtable *h) 537 { 538 const unsigned int hashval = 539 (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^ 540 ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^ 541 ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^ 542 ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev))) 543 & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); 544 545 BUG_ON(NULL == l->block_ref_to); 546 BUG_ON(NULL == l->block_ref_from); 547 list_add(&l->collision_resolving_node, h->table + hashval); 548 } 549 550 static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l) 551 { 552 list_del(&l->collision_resolving_node); 553 } 554 555 static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup( 556 struct block_device *bdev_ref_to, 557 u64 dev_bytenr_ref_to, 558 struct block_device *bdev_ref_from, 559 u64 dev_bytenr_ref_from, 560 struct btrfsic_block_link_hashtable *h) 561 { 562 const unsigned int hashval = 563 (((unsigned int)(dev_bytenr_ref_to >> 16)) ^ 564 ((unsigned int)(dev_bytenr_ref_from >> 16)) ^ 565 ((unsigned int)((uintptr_t)bdev_ref_to)) ^ 566 ((unsigned int)((uintptr_t)bdev_ref_from))) & 567 (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); 568 struct btrfsic_block_link *l; 569 570 list_for_each_entry(l, h->table + hashval, collision_resolving_node) { 571 BUG_ON(NULL == l->block_ref_to); 572 BUG_ON(NULL == l->block_ref_from); 573 if (l->block_ref_to->dev_state->bdev == bdev_ref_to && 574 l->block_ref_to->dev_bytenr == dev_bytenr_ref_to && 575 l->block_ref_from->dev_state->bdev == bdev_ref_from && 576 l->block_ref_from->dev_bytenr == dev_bytenr_ref_from) 577 return l; 578 } 579 580 return NULL; 581 } 582 583 static void btrfsic_dev_state_hashtable_init( 584 struct btrfsic_dev_state_hashtable *h) 585 { 586 int i; 587 588 for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++) 589 INIT_LIST_HEAD(h->table + i); 590 } 591 592 static void btrfsic_dev_state_hashtable_add( 593 struct btrfsic_dev_state *ds, 594 struct btrfsic_dev_state_hashtable *h) 595 { 596 const unsigned int hashval = 597 (((unsigned int)((uintptr_t)ds->bdev->bd_dev)) & 598 (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1)); 599 600 list_add(&ds->collision_resolving_node, h->table + hashval); 601 } 602 603 static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds) 604 { 605 list_del(&ds->collision_resolving_node); 606 } 607 608 static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev, 609 struct btrfsic_dev_state_hashtable *h) 610 { 611 const unsigned int hashval = 612 dev & (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1); 613 struct btrfsic_dev_state *ds; 614 615 list_for_each_entry(ds, h->table + hashval, collision_resolving_node) { 616 if (ds->bdev->bd_dev == dev) 617 return ds; 618 } 619 620 return NULL; 621 } 622 623 static int btrfsic_process_superblock(struct btrfsic_state *state, 624 struct btrfs_fs_devices *fs_devices) 625 { 626 struct btrfs_super_block *selected_super; 627 struct list_head *dev_head = &fs_devices->devices; 628 struct btrfs_device *device; 629 struct btrfsic_dev_state *selected_dev_state = NULL; 630 int ret = 0; 631 int pass; 632 633 selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS); 634 if (NULL == selected_super) { 635 pr_info("btrfsic: error, kmalloc failed!\n"); 636 return -ENOMEM; 637 } 638 639 list_for_each_entry(device, dev_head, dev_list) { 640 int i; 641 struct btrfsic_dev_state *dev_state; 642 643 if (!device->bdev || !device->name) 644 continue; 645 646 dev_state = btrfsic_dev_state_lookup(device->bdev->bd_dev); 647 BUG_ON(NULL == dev_state); 648 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 649 ret = btrfsic_process_superblock_dev_mirror( 650 state, dev_state, device, i, 651 &selected_dev_state, selected_super); 652 if (0 != ret && 0 == i) { 653 kfree(selected_super); 654 return ret; 655 } 656 } 657 } 658 659 if (NULL == state->latest_superblock) { 660 pr_info("btrfsic: no superblock found!\n"); 661 kfree(selected_super); 662 return -1; 663 } 664 665 state->csum_size = btrfs_super_csum_size(selected_super); 666 667 for (pass = 0; pass < 3; pass++) { 668 int num_copies; 669 int mirror_num; 670 u64 next_bytenr; 671 672 switch (pass) { 673 case 0: 674 next_bytenr = btrfs_super_root(selected_super); 675 if (state->print_mask & 676 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 677 pr_info("root@%llu\n", next_bytenr); 678 break; 679 case 1: 680 next_bytenr = btrfs_super_chunk_root(selected_super); 681 if (state->print_mask & 682 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 683 pr_info("chunk@%llu\n", next_bytenr); 684 break; 685 case 2: 686 next_bytenr = btrfs_super_log_root(selected_super); 687 if (0 == next_bytenr) 688 continue; 689 if (state->print_mask & 690 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 691 pr_info("log@%llu\n", next_bytenr); 692 break; 693 } 694 695 num_copies = btrfs_num_copies(state->fs_info, next_bytenr, 696 state->metablock_size); 697 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 698 pr_info("num_copies(log_bytenr=%llu) = %d\n", 699 next_bytenr, num_copies); 700 701 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 702 struct btrfsic_block *next_block; 703 struct btrfsic_block_data_ctx tmp_next_block_ctx; 704 struct btrfsic_block_link *l; 705 706 ret = btrfsic_map_block(state, next_bytenr, 707 state->metablock_size, 708 &tmp_next_block_ctx, 709 mirror_num); 710 if (ret) { 711 pr_info("btrfsic: btrfsic_map_block(root @%llu, mirror %d) failed!\n", 712 next_bytenr, mirror_num); 713 kfree(selected_super); 714 return -1; 715 } 716 717 next_block = btrfsic_block_hashtable_lookup( 718 tmp_next_block_ctx.dev->bdev, 719 tmp_next_block_ctx.dev_bytenr, 720 &state->block_hashtable); 721 BUG_ON(NULL == next_block); 722 723 l = btrfsic_block_link_hashtable_lookup( 724 tmp_next_block_ctx.dev->bdev, 725 tmp_next_block_ctx.dev_bytenr, 726 state->latest_superblock->dev_state-> 727 bdev, 728 state->latest_superblock->dev_bytenr, 729 &state->block_link_hashtable); 730 BUG_ON(NULL == l); 731 732 ret = btrfsic_read_block(state, &tmp_next_block_ctx); 733 if (ret < (int)PAGE_SIZE) { 734 pr_info("btrfsic: read @logical %llu failed!\n", 735 tmp_next_block_ctx.start); 736 btrfsic_release_block_ctx(&tmp_next_block_ctx); 737 kfree(selected_super); 738 return -1; 739 } 740 741 ret = btrfsic_process_metablock(state, 742 next_block, 743 &tmp_next_block_ctx, 744 BTRFS_MAX_LEVEL + 3, 1); 745 btrfsic_release_block_ctx(&tmp_next_block_ctx); 746 } 747 } 748 749 kfree(selected_super); 750 return ret; 751 } 752 753 static int btrfsic_process_superblock_dev_mirror( 754 struct btrfsic_state *state, 755 struct btrfsic_dev_state *dev_state, 756 struct btrfs_device *device, 757 int superblock_mirror_num, 758 struct btrfsic_dev_state **selected_dev_state, 759 struct btrfs_super_block *selected_super) 760 { 761 struct btrfs_fs_info *fs_info = state->fs_info; 762 struct btrfs_super_block *super_tmp; 763 u64 dev_bytenr; 764 struct btrfsic_block *superblock_tmp; 765 int pass; 766 struct block_device *const superblock_bdev = device->bdev; 767 struct page *page; 768 struct address_space *mapping = superblock_bdev->bd_inode->i_mapping; 769 int ret = 0; 770 771 /* super block bytenr is always the unmapped device bytenr */ 772 dev_bytenr = btrfs_sb_offset(superblock_mirror_num); 773 if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->commit_total_bytes) 774 return -1; 775 776 page = read_cache_page_gfp(mapping, dev_bytenr >> PAGE_SHIFT, GFP_NOFS); 777 if (IS_ERR(page)) 778 return -1; 779 780 super_tmp = page_address(page); 781 782 if (btrfs_super_bytenr(super_tmp) != dev_bytenr || 783 btrfs_super_magic(super_tmp) != BTRFS_MAGIC || 784 memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) || 785 btrfs_super_nodesize(super_tmp) != state->metablock_size || 786 btrfs_super_sectorsize(super_tmp) != state->datablock_size) { 787 ret = 0; 788 goto out; 789 } 790 791 superblock_tmp = 792 btrfsic_block_hashtable_lookup(superblock_bdev, 793 dev_bytenr, 794 &state->block_hashtable); 795 if (NULL == superblock_tmp) { 796 superblock_tmp = btrfsic_block_alloc(); 797 if (NULL == superblock_tmp) { 798 pr_info("btrfsic: error, kmalloc failed!\n"); 799 ret = -1; 800 goto out; 801 } 802 /* for superblock, only the dev_bytenr makes sense */ 803 superblock_tmp->dev_bytenr = dev_bytenr; 804 superblock_tmp->dev_state = dev_state; 805 superblock_tmp->logical_bytenr = dev_bytenr; 806 superblock_tmp->generation = btrfs_super_generation(super_tmp); 807 superblock_tmp->is_metadata = 1; 808 superblock_tmp->is_superblock = 1; 809 superblock_tmp->is_iodone = 1; 810 superblock_tmp->never_written = 0; 811 superblock_tmp->mirror_num = 1 + superblock_mirror_num; 812 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 813 btrfs_info_in_rcu(fs_info, 814 "new initial S-block (bdev %p, %s) @%llu (%s/%llu/%d)", 815 superblock_bdev, 816 rcu_str_deref(device->name), dev_bytenr, 817 dev_state->name, dev_bytenr, 818 superblock_mirror_num); 819 list_add(&superblock_tmp->all_blocks_node, 820 &state->all_blocks_list); 821 btrfsic_block_hashtable_add(superblock_tmp, 822 &state->block_hashtable); 823 } 824 825 /* select the one with the highest generation field */ 826 if (btrfs_super_generation(super_tmp) > 827 state->max_superblock_generation || 828 0 == state->max_superblock_generation) { 829 memcpy(selected_super, super_tmp, sizeof(*selected_super)); 830 *selected_dev_state = dev_state; 831 state->max_superblock_generation = 832 btrfs_super_generation(super_tmp); 833 state->latest_superblock = superblock_tmp; 834 } 835 836 for (pass = 0; pass < 3; pass++) { 837 u64 next_bytenr; 838 int num_copies; 839 int mirror_num; 840 const char *additional_string = NULL; 841 struct btrfs_disk_key tmp_disk_key; 842 843 tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY; 844 tmp_disk_key.offset = 0; 845 switch (pass) { 846 case 0: 847 btrfs_set_disk_key_objectid(&tmp_disk_key, 848 BTRFS_ROOT_TREE_OBJECTID); 849 additional_string = "initial root "; 850 next_bytenr = btrfs_super_root(super_tmp); 851 break; 852 case 1: 853 btrfs_set_disk_key_objectid(&tmp_disk_key, 854 BTRFS_CHUNK_TREE_OBJECTID); 855 additional_string = "initial chunk "; 856 next_bytenr = btrfs_super_chunk_root(super_tmp); 857 break; 858 case 2: 859 btrfs_set_disk_key_objectid(&tmp_disk_key, 860 BTRFS_TREE_LOG_OBJECTID); 861 additional_string = "initial log "; 862 next_bytenr = btrfs_super_log_root(super_tmp); 863 if (0 == next_bytenr) 864 continue; 865 break; 866 } 867 868 num_copies = btrfs_num_copies(fs_info, next_bytenr, 869 state->metablock_size); 870 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 871 pr_info("num_copies(log_bytenr=%llu) = %d\n", 872 next_bytenr, num_copies); 873 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 874 struct btrfsic_block *next_block; 875 struct btrfsic_block_data_ctx tmp_next_block_ctx; 876 struct btrfsic_block_link *l; 877 878 if (btrfsic_map_block(state, next_bytenr, 879 state->metablock_size, 880 &tmp_next_block_ctx, 881 mirror_num)) { 882 pr_info("btrfsic: btrfsic_map_block(bytenr @%llu, mirror %d) failed!\n", 883 next_bytenr, mirror_num); 884 ret = -1; 885 goto out; 886 } 887 888 next_block = btrfsic_block_lookup_or_add( 889 state, &tmp_next_block_ctx, 890 additional_string, 1, 1, 0, 891 mirror_num, NULL); 892 if (NULL == next_block) { 893 btrfsic_release_block_ctx(&tmp_next_block_ctx); 894 ret = -1; 895 goto out; 896 } 897 898 next_block->disk_key = tmp_disk_key; 899 next_block->generation = BTRFSIC_GENERATION_UNKNOWN; 900 l = btrfsic_block_link_lookup_or_add( 901 state, &tmp_next_block_ctx, 902 next_block, superblock_tmp, 903 BTRFSIC_GENERATION_UNKNOWN); 904 btrfsic_release_block_ctx(&tmp_next_block_ctx); 905 if (NULL == l) { 906 ret = -1; 907 goto out; 908 } 909 } 910 } 911 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES) 912 btrfsic_dump_tree_sub(state, superblock_tmp, 0); 913 914 out: 915 put_page(page); 916 return ret; 917 } 918 919 static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void) 920 { 921 struct btrfsic_stack_frame *sf; 922 923 sf = kzalloc(sizeof(*sf), GFP_NOFS); 924 if (NULL == sf) 925 pr_info("btrfsic: alloc memory failed!\n"); 926 else 927 sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER; 928 return sf; 929 } 930 931 static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf) 932 { 933 BUG_ON(!(NULL == sf || 934 BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic)); 935 kfree(sf); 936 } 937 938 static noinline_for_stack int btrfsic_process_metablock( 939 struct btrfsic_state *state, 940 struct btrfsic_block *const first_block, 941 struct btrfsic_block_data_ctx *const first_block_ctx, 942 int first_limit_nesting, int force_iodone_flag) 943 { 944 struct btrfsic_stack_frame initial_stack_frame = { 0 }; 945 struct btrfsic_stack_frame *sf; 946 struct btrfsic_stack_frame *next_stack; 947 struct btrfs_header *const first_hdr = 948 (struct btrfs_header *)first_block_ctx->datav[0]; 949 950 BUG_ON(!first_hdr); 951 sf = &initial_stack_frame; 952 sf->error = 0; 953 sf->i = -1; 954 sf->limit_nesting = first_limit_nesting; 955 sf->block = first_block; 956 sf->block_ctx = first_block_ctx; 957 sf->next_block = NULL; 958 sf->hdr = first_hdr; 959 sf->prev = NULL; 960 961 continue_with_new_stack_frame: 962 sf->block->generation = le64_to_cpu(sf->hdr->generation); 963 if (0 == sf->hdr->level) { 964 struct btrfs_leaf *const leafhdr = 965 (struct btrfs_leaf *)sf->hdr; 966 967 if (-1 == sf->i) { 968 sf->nr = btrfs_stack_header_nritems(&leafhdr->header); 969 970 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 971 pr_info("leaf %llu items %d generation %llu owner %llu\n", 972 sf->block_ctx->start, sf->nr, 973 btrfs_stack_header_generation( 974 &leafhdr->header), 975 btrfs_stack_header_owner( 976 &leafhdr->header)); 977 } 978 979 continue_with_current_leaf_stack_frame: 980 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { 981 sf->i++; 982 sf->num_copies = 0; 983 } 984 985 if (sf->i < sf->nr) { 986 struct btrfs_item disk_item; 987 u32 disk_item_offset = 988 (uintptr_t)(leafhdr->items + sf->i) - 989 (uintptr_t)leafhdr; 990 struct btrfs_disk_key *disk_key; 991 u8 type; 992 u32 item_offset; 993 u32 item_size; 994 995 if (disk_item_offset + sizeof(struct btrfs_item) > 996 sf->block_ctx->len) { 997 leaf_item_out_of_bounce_error: 998 pr_info("btrfsic: leaf item out of bounce at logical %llu, dev %s\n", 999 sf->block_ctx->start, 1000 sf->block_ctx->dev->name); 1001 goto one_stack_frame_backwards; 1002 } 1003 btrfsic_read_from_block_data(sf->block_ctx, 1004 &disk_item, 1005 disk_item_offset, 1006 sizeof(struct btrfs_item)); 1007 item_offset = btrfs_stack_item_offset(&disk_item); 1008 item_size = btrfs_stack_item_size(&disk_item); 1009 disk_key = &disk_item.key; 1010 type = btrfs_disk_key_type(disk_key); 1011 1012 if (BTRFS_ROOT_ITEM_KEY == type) { 1013 struct btrfs_root_item root_item; 1014 u32 root_item_offset; 1015 u64 next_bytenr; 1016 1017 root_item_offset = item_offset + 1018 offsetof(struct btrfs_leaf, items); 1019 if (root_item_offset + item_size > 1020 sf->block_ctx->len) 1021 goto leaf_item_out_of_bounce_error; 1022 btrfsic_read_from_block_data( 1023 sf->block_ctx, &root_item, 1024 root_item_offset, 1025 item_size); 1026 next_bytenr = btrfs_root_bytenr(&root_item); 1027 1028 sf->error = 1029 btrfsic_create_link_to_next_block( 1030 state, 1031 sf->block, 1032 sf->block_ctx, 1033 next_bytenr, 1034 sf->limit_nesting, 1035 &sf->next_block_ctx, 1036 &sf->next_block, 1037 force_iodone_flag, 1038 &sf->num_copies, 1039 &sf->mirror_num, 1040 disk_key, 1041 btrfs_root_generation( 1042 &root_item)); 1043 if (sf->error) 1044 goto one_stack_frame_backwards; 1045 1046 if (NULL != sf->next_block) { 1047 struct btrfs_header *const next_hdr = 1048 (struct btrfs_header *) 1049 sf->next_block_ctx.datav[0]; 1050 1051 next_stack = 1052 btrfsic_stack_frame_alloc(); 1053 if (NULL == next_stack) { 1054 sf->error = -1; 1055 btrfsic_release_block_ctx( 1056 &sf-> 1057 next_block_ctx); 1058 goto one_stack_frame_backwards; 1059 } 1060 1061 next_stack->i = -1; 1062 next_stack->block = sf->next_block; 1063 next_stack->block_ctx = 1064 &sf->next_block_ctx; 1065 next_stack->next_block = NULL; 1066 next_stack->hdr = next_hdr; 1067 next_stack->limit_nesting = 1068 sf->limit_nesting - 1; 1069 next_stack->prev = sf; 1070 sf = next_stack; 1071 goto continue_with_new_stack_frame; 1072 } 1073 } else if (BTRFS_EXTENT_DATA_KEY == type && 1074 state->include_extent_data) { 1075 sf->error = btrfsic_handle_extent_data( 1076 state, 1077 sf->block, 1078 sf->block_ctx, 1079 item_offset, 1080 force_iodone_flag); 1081 if (sf->error) 1082 goto one_stack_frame_backwards; 1083 } 1084 1085 goto continue_with_current_leaf_stack_frame; 1086 } 1087 } else { 1088 struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr; 1089 1090 if (-1 == sf->i) { 1091 sf->nr = btrfs_stack_header_nritems(&nodehdr->header); 1092 1093 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1094 pr_info("node %llu level %d items %d generation %llu owner %llu\n", 1095 sf->block_ctx->start, 1096 nodehdr->header.level, sf->nr, 1097 btrfs_stack_header_generation( 1098 &nodehdr->header), 1099 btrfs_stack_header_owner( 1100 &nodehdr->header)); 1101 } 1102 1103 continue_with_current_node_stack_frame: 1104 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { 1105 sf->i++; 1106 sf->num_copies = 0; 1107 } 1108 1109 if (sf->i < sf->nr) { 1110 struct btrfs_key_ptr key_ptr; 1111 u32 key_ptr_offset; 1112 u64 next_bytenr; 1113 1114 key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) - 1115 (uintptr_t)nodehdr; 1116 if (key_ptr_offset + sizeof(struct btrfs_key_ptr) > 1117 sf->block_ctx->len) { 1118 pr_info("btrfsic: node item out of bounce at logical %llu, dev %s\n", 1119 sf->block_ctx->start, 1120 sf->block_ctx->dev->name); 1121 goto one_stack_frame_backwards; 1122 } 1123 btrfsic_read_from_block_data( 1124 sf->block_ctx, &key_ptr, key_ptr_offset, 1125 sizeof(struct btrfs_key_ptr)); 1126 next_bytenr = btrfs_stack_key_blockptr(&key_ptr); 1127 1128 sf->error = btrfsic_create_link_to_next_block( 1129 state, 1130 sf->block, 1131 sf->block_ctx, 1132 next_bytenr, 1133 sf->limit_nesting, 1134 &sf->next_block_ctx, 1135 &sf->next_block, 1136 force_iodone_flag, 1137 &sf->num_copies, 1138 &sf->mirror_num, 1139 &key_ptr.key, 1140 btrfs_stack_key_generation(&key_ptr)); 1141 if (sf->error) 1142 goto one_stack_frame_backwards; 1143 1144 if (NULL != sf->next_block) { 1145 struct btrfs_header *const next_hdr = 1146 (struct btrfs_header *) 1147 sf->next_block_ctx.datav[0]; 1148 1149 next_stack = btrfsic_stack_frame_alloc(); 1150 if (NULL == next_stack) { 1151 sf->error = -1; 1152 goto one_stack_frame_backwards; 1153 } 1154 1155 next_stack->i = -1; 1156 next_stack->block = sf->next_block; 1157 next_stack->block_ctx = &sf->next_block_ctx; 1158 next_stack->next_block = NULL; 1159 next_stack->hdr = next_hdr; 1160 next_stack->limit_nesting = 1161 sf->limit_nesting - 1; 1162 next_stack->prev = sf; 1163 sf = next_stack; 1164 goto continue_with_new_stack_frame; 1165 } 1166 1167 goto continue_with_current_node_stack_frame; 1168 } 1169 } 1170 1171 one_stack_frame_backwards: 1172 if (NULL != sf->prev) { 1173 struct btrfsic_stack_frame *const prev = sf->prev; 1174 1175 /* the one for the initial block is freed in the caller */ 1176 btrfsic_release_block_ctx(sf->block_ctx); 1177 1178 if (sf->error) { 1179 prev->error = sf->error; 1180 btrfsic_stack_frame_free(sf); 1181 sf = prev; 1182 goto one_stack_frame_backwards; 1183 } 1184 1185 btrfsic_stack_frame_free(sf); 1186 sf = prev; 1187 goto continue_with_new_stack_frame; 1188 } else { 1189 BUG_ON(&initial_stack_frame != sf); 1190 } 1191 1192 return sf->error; 1193 } 1194 1195 static void btrfsic_read_from_block_data( 1196 struct btrfsic_block_data_ctx *block_ctx, 1197 void *dstv, u32 offset, size_t len) 1198 { 1199 size_t cur; 1200 size_t pgoff; 1201 char *kaddr; 1202 char *dst = (char *)dstv; 1203 size_t start_offset = offset_in_page(block_ctx->start); 1204 unsigned long i = (start_offset + offset) >> PAGE_SHIFT; 1205 1206 WARN_ON(offset + len > block_ctx->len); 1207 pgoff = offset_in_page(start_offset + offset); 1208 1209 while (len > 0) { 1210 cur = min(len, ((size_t)PAGE_SIZE - pgoff)); 1211 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE)); 1212 kaddr = block_ctx->datav[i]; 1213 memcpy(dst, kaddr + pgoff, cur); 1214 1215 dst += cur; 1216 len -= cur; 1217 pgoff = 0; 1218 i++; 1219 } 1220 } 1221 1222 static int btrfsic_create_link_to_next_block( 1223 struct btrfsic_state *state, 1224 struct btrfsic_block *block, 1225 struct btrfsic_block_data_ctx *block_ctx, 1226 u64 next_bytenr, 1227 int limit_nesting, 1228 struct btrfsic_block_data_ctx *next_block_ctx, 1229 struct btrfsic_block **next_blockp, 1230 int force_iodone_flag, 1231 int *num_copiesp, int *mirror_nump, 1232 struct btrfs_disk_key *disk_key, 1233 u64 parent_generation) 1234 { 1235 struct btrfs_fs_info *fs_info = state->fs_info; 1236 struct btrfsic_block *next_block = NULL; 1237 int ret; 1238 struct btrfsic_block_link *l; 1239 int did_alloc_block_link; 1240 int block_was_created; 1241 1242 *next_blockp = NULL; 1243 if (0 == *num_copiesp) { 1244 *num_copiesp = btrfs_num_copies(fs_info, next_bytenr, 1245 state->metablock_size); 1246 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1247 pr_info("num_copies(log_bytenr=%llu) = %d\n", 1248 next_bytenr, *num_copiesp); 1249 *mirror_nump = 1; 1250 } 1251 1252 if (*mirror_nump > *num_copiesp) 1253 return 0; 1254 1255 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1256 pr_info("btrfsic_create_link_to_next_block(mirror_num=%d)\n", 1257 *mirror_nump); 1258 ret = btrfsic_map_block(state, next_bytenr, 1259 state->metablock_size, 1260 next_block_ctx, *mirror_nump); 1261 if (ret) { 1262 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 1263 next_bytenr, *mirror_nump); 1264 btrfsic_release_block_ctx(next_block_ctx); 1265 *next_blockp = NULL; 1266 return -1; 1267 } 1268 1269 next_block = btrfsic_block_lookup_or_add(state, 1270 next_block_ctx, "referenced ", 1271 1, force_iodone_flag, 1272 !force_iodone_flag, 1273 *mirror_nump, 1274 &block_was_created); 1275 if (NULL == next_block) { 1276 btrfsic_release_block_ctx(next_block_ctx); 1277 *next_blockp = NULL; 1278 return -1; 1279 } 1280 if (block_was_created) { 1281 l = NULL; 1282 next_block->generation = BTRFSIC_GENERATION_UNKNOWN; 1283 } else { 1284 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) { 1285 if (next_block->logical_bytenr != next_bytenr && 1286 !(!next_block->is_metadata && 1287 0 == next_block->logical_bytenr)) 1288 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n", 1289 next_bytenr, next_block_ctx->dev->name, 1290 next_block_ctx->dev_bytenr, *mirror_nump, 1291 btrfsic_get_block_type(state, 1292 next_block), 1293 next_block->logical_bytenr); 1294 else 1295 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c.\n", 1296 next_bytenr, next_block_ctx->dev->name, 1297 next_block_ctx->dev_bytenr, *mirror_nump, 1298 btrfsic_get_block_type(state, 1299 next_block)); 1300 } 1301 next_block->logical_bytenr = next_bytenr; 1302 1303 next_block->mirror_num = *mirror_nump; 1304 l = btrfsic_block_link_hashtable_lookup( 1305 next_block_ctx->dev->bdev, 1306 next_block_ctx->dev_bytenr, 1307 block_ctx->dev->bdev, 1308 block_ctx->dev_bytenr, 1309 &state->block_link_hashtable); 1310 } 1311 1312 next_block->disk_key = *disk_key; 1313 if (NULL == l) { 1314 l = btrfsic_block_link_alloc(); 1315 if (NULL == l) { 1316 pr_info("btrfsic: error, kmalloc failed!\n"); 1317 btrfsic_release_block_ctx(next_block_ctx); 1318 *next_blockp = NULL; 1319 return -1; 1320 } 1321 1322 did_alloc_block_link = 1; 1323 l->block_ref_to = next_block; 1324 l->block_ref_from = block; 1325 l->ref_cnt = 1; 1326 l->parent_generation = parent_generation; 1327 1328 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1329 btrfsic_print_add_link(state, l); 1330 1331 list_add(&l->node_ref_to, &block->ref_to_list); 1332 list_add(&l->node_ref_from, &next_block->ref_from_list); 1333 1334 btrfsic_block_link_hashtable_add(l, 1335 &state->block_link_hashtable); 1336 } else { 1337 did_alloc_block_link = 0; 1338 if (0 == limit_nesting) { 1339 l->ref_cnt++; 1340 l->parent_generation = parent_generation; 1341 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1342 btrfsic_print_add_link(state, l); 1343 } 1344 } 1345 1346 if (limit_nesting > 0 && did_alloc_block_link) { 1347 ret = btrfsic_read_block(state, next_block_ctx); 1348 if (ret < (int)next_block_ctx->len) { 1349 pr_info("btrfsic: read block @logical %llu failed!\n", 1350 next_bytenr); 1351 btrfsic_release_block_ctx(next_block_ctx); 1352 *next_blockp = NULL; 1353 return -1; 1354 } 1355 1356 *next_blockp = next_block; 1357 } else { 1358 *next_blockp = NULL; 1359 } 1360 (*mirror_nump)++; 1361 1362 return 0; 1363 } 1364 1365 static int btrfsic_handle_extent_data( 1366 struct btrfsic_state *state, 1367 struct btrfsic_block *block, 1368 struct btrfsic_block_data_ctx *block_ctx, 1369 u32 item_offset, int force_iodone_flag) 1370 { 1371 struct btrfs_fs_info *fs_info = state->fs_info; 1372 struct btrfs_file_extent_item file_extent_item; 1373 u64 file_extent_item_offset; 1374 u64 next_bytenr; 1375 u64 num_bytes; 1376 u64 generation; 1377 struct btrfsic_block_link *l; 1378 int ret; 1379 1380 file_extent_item_offset = offsetof(struct btrfs_leaf, items) + 1381 item_offset; 1382 if (file_extent_item_offset + 1383 offsetof(struct btrfs_file_extent_item, disk_num_bytes) > 1384 block_ctx->len) { 1385 pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n", 1386 block_ctx->start, block_ctx->dev->name); 1387 return -1; 1388 } 1389 1390 btrfsic_read_from_block_data(block_ctx, &file_extent_item, 1391 file_extent_item_offset, 1392 offsetof(struct btrfs_file_extent_item, disk_num_bytes)); 1393 if (BTRFS_FILE_EXTENT_REG != file_extent_item.type || 1394 btrfs_stack_file_extent_disk_bytenr(&file_extent_item) == 0) { 1395 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1396 pr_info("extent_data: type %u, disk_bytenr = %llu\n", 1397 file_extent_item.type, 1398 btrfs_stack_file_extent_disk_bytenr( 1399 &file_extent_item)); 1400 return 0; 1401 } 1402 1403 if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) > 1404 block_ctx->len) { 1405 pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n", 1406 block_ctx->start, block_ctx->dev->name); 1407 return -1; 1408 } 1409 btrfsic_read_from_block_data(block_ctx, &file_extent_item, 1410 file_extent_item_offset, 1411 sizeof(struct btrfs_file_extent_item)); 1412 next_bytenr = btrfs_stack_file_extent_disk_bytenr(&file_extent_item); 1413 if (btrfs_stack_file_extent_compression(&file_extent_item) == 1414 BTRFS_COMPRESS_NONE) { 1415 next_bytenr += btrfs_stack_file_extent_offset(&file_extent_item); 1416 num_bytes = btrfs_stack_file_extent_num_bytes(&file_extent_item); 1417 } else { 1418 num_bytes = btrfs_stack_file_extent_disk_num_bytes(&file_extent_item); 1419 } 1420 generation = btrfs_stack_file_extent_generation(&file_extent_item); 1421 1422 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1423 pr_info("extent_data: type %u, disk_bytenr = %llu, offset = %llu, num_bytes = %llu\n", 1424 file_extent_item.type, 1425 btrfs_stack_file_extent_disk_bytenr(&file_extent_item), 1426 btrfs_stack_file_extent_offset(&file_extent_item), 1427 num_bytes); 1428 while (num_bytes > 0) { 1429 u32 chunk_len; 1430 int num_copies; 1431 int mirror_num; 1432 1433 if (num_bytes > state->datablock_size) 1434 chunk_len = state->datablock_size; 1435 else 1436 chunk_len = num_bytes; 1437 1438 num_copies = btrfs_num_copies(fs_info, next_bytenr, 1439 state->datablock_size); 1440 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1441 pr_info("num_copies(log_bytenr=%llu) = %d\n", 1442 next_bytenr, num_copies); 1443 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 1444 struct btrfsic_block_data_ctx next_block_ctx; 1445 struct btrfsic_block *next_block; 1446 int block_was_created; 1447 1448 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1449 pr_info("btrfsic_handle_extent_data(mirror_num=%d)\n", 1450 mirror_num); 1451 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1452 pr_info("\tdisk_bytenr = %llu, num_bytes %u\n", 1453 next_bytenr, chunk_len); 1454 ret = btrfsic_map_block(state, next_bytenr, 1455 chunk_len, &next_block_ctx, 1456 mirror_num); 1457 if (ret) { 1458 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 1459 next_bytenr, mirror_num); 1460 return -1; 1461 } 1462 1463 next_block = btrfsic_block_lookup_or_add( 1464 state, 1465 &next_block_ctx, 1466 "referenced ", 1467 0, 1468 force_iodone_flag, 1469 !force_iodone_flag, 1470 mirror_num, 1471 &block_was_created); 1472 if (NULL == next_block) { 1473 pr_info("btrfsic: error, kmalloc failed!\n"); 1474 btrfsic_release_block_ctx(&next_block_ctx); 1475 return -1; 1476 } 1477 if (!block_was_created) { 1478 if ((state->print_mask & 1479 BTRFSIC_PRINT_MASK_VERBOSE) && 1480 next_block->logical_bytenr != next_bytenr && 1481 !(!next_block->is_metadata && 1482 0 == next_block->logical_bytenr)) { 1483 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, D, bytenr mismatch (!= stored %llu).\n", 1484 next_bytenr, 1485 next_block_ctx.dev->name, 1486 next_block_ctx.dev_bytenr, 1487 mirror_num, 1488 next_block->logical_bytenr); 1489 } 1490 next_block->logical_bytenr = next_bytenr; 1491 next_block->mirror_num = mirror_num; 1492 } 1493 1494 l = btrfsic_block_link_lookup_or_add(state, 1495 &next_block_ctx, 1496 next_block, block, 1497 generation); 1498 btrfsic_release_block_ctx(&next_block_ctx); 1499 if (NULL == l) 1500 return -1; 1501 } 1502 1503 next_bytenr += chunk_len; 1504 num_bytes -= chunk_len; 1505 } 1506 1507 return 0; 1508 } 1509 1510 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 1511 struct btrfsic_block_data_ctx *block_ctx_out, 1512 int mirror_num) 1513 { 1514 struct btrfs_fs_info *fs_info = state->fs_info; 1515 int ret; 1516 u64 length; 1517 struct btrfs_bio *multi = NULL; 1518 struct btrfs_device *device; 1519 1520 length = len; 1521 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, 1522 bytenr, &length, &multi, mirror_num); 1523 1524 if (ret) { 1525 block_ctx_out->start = 0; 1526 block_ctx_out->dev_bytenr = 0; 1527 block_ctx_out->len = 0; 1528 block_ctx_out->dev = NULL; 1529 block_ctx_out->datav = NULL; 1530 block_ctx_out->pagev = NULL; 1531 block_ctx_out->mem_to_free = NULL; 1532 1533 return ret; 1534 } 1535 1536 device = multi->stripes[0].dev; 1537 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) || 1538 !device->bdev || !device->name) 1539 block_ctx_out->dev = NULL; 1540 else 1541 block_ctx_out->dev = btrfsic_dev_state_lookup( 1542 device->bdev->bd_dev); 1543 block_ctx_out->dev_bytenr = multi->stripes[0].physical; 1544 block_ctx_out->start = bytenr; 1545 block_ctx_out->len = len; 1546 block_ctx_out->datav = NULL; 1547 block_ctx_out->pagev = NULL; 1548 block_ctx_out->mem_to_free = NULL; 1549 1550 kfree(multi); 1551 if (NULL == block_ctx_out->dev) { 1552 ret = -ENXIO; 1553 pr_info("btrfsic: error, cannot lookup dev (#1)!\n"); 1554 } 1555 1556 return ret; 1557 } 1558 1559 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx) 1560 { 1561 if (block_ctx->mem_to_free) { 1562 unsigned int num_pages; 1563 1564 BUG_ON(!block_ctx->datav); 1565 BUG_ON(!block_ctx->pagev); 1566 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> 1567 PAGE_SHIFT; 1568 while (num_pages > 0) { 1569 num_pages--; 1570 if (block_ctx->datav[num_pages]) { 1571 kunmap(block_ctx->pagev[num_pages]); 1572 block_ctx->datav[num_pages] = NULL; 1573 } 1574 if (block_ctx->pagev[num_pages]) { 1575 __free_page(block_ctx->pagev[num_pages]); 1576 block_ctx->pagev[num_pages] = NULL; 1577 } 1578 } 1579 1580 kfree(block_ctx->mem_to_free); 1581 block_ctx->mem_to_free = NULL; 1582 block_ctx->pagev = NULL; 1583 block_ctx->datav = NULL; 1584 } 1585 } 1586 1587 static int btrfsic_read_block(struct btrfsic_state *state, 1588 struct btrfsic_block_data_ctx *block_ctx) 1589 { 1590 unsigned int num_pages; 1591 unsigned int i; 1592 size_t size; 1593 u64 dev_bytenr; 1594 int ret; 1595 1596 BUG_ON(block_ctx->datav); 1597 BUG_ON(block_ctx->pagev); 1598 BUG_ON(block_ctx->mem_to_free); 1599 if (!PAGE_ALIGNED(block_ctx->dev_bytenr)) { 1600 pr_info("btrfsic: read_block() with unaligned bytenr %llu\n", 1601 block_ctx->dev_bytenr); 1602 return -1; 1603 } 1604 1605 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> 1606 PAGE_SHIFT; 1607 size = sizeof(*block_ctx->datav) + sizeof(*block_ctx->pagev); 1608 block_ctx->mem_to_free = kcalloc(num_pages, size, GFP_NOFS); 1609 if (!block_ctx->mem_to_free) 1610 return -ENOMEM; 1611 block_ctx->datav = block_ctx->mem_to_free; 1612 block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages); 1613 for (i = 0; i < num_pages; i++) { 1614 block_ctx->pagev[i] = alloc_page(GFP_NOFS); 1615 if (!block_ctx->pagev[i]) 1616 return -1; 1617 } 1618 1619 dev_bytenr = block_ctx->dev_bytenr; 1620 for (i = 0; i < num_pages;) { 1621 struct bio *bio; 1622 unsigned int j; 1623 1624 bio = btrfs_io_bio_alloc(num_pages - i); 1625 bio_set_dev(bio, block_ctx->dev->bdev); 1626 bio->bi_iter.bi_sector = dev_bytenr >> 9; 1627 bio->bi_opf = REQ_OP_READ; 1628 1629 for (j = i; j < num_pages; j++) { 1630 ret = bio_add_page(bio, block_ctx->pagev[j], 1631 PAGE_SIZE, 0); 1632 if (PAGE_SIZE != ret) 1633 break; 1634 } 1635 if (j == i) { 1636 pr_info("btrfsic: error, failed to add a single page!\n"); 1637 return -1; 1638 } 1639 if (submit_bio_wait(bio)) { 1640 pr_info("btrfsic: read error at logical %llu dev %s!\n", 1641 block_ctx->start, block_ctx->dev->name); 1642 bio_put(bio); 1643 return -1; 1644 } 1645 bio_put(bio); 1646 dev_bytenr += (j - i) * PAGE_SIZE; 1647 i = j; 1648 } 1649 for (i = 0; i < num_pages; i++) 1650 block_ctx->datav[i] = kmap(block_ctx->pagev[i]); 1651 1652 return block_ctx->len; 1653 } 1654 1655 static void btrfsic_dump_database(struct btrfsic_state *state) 1656 { 1657 const struct btrfsic_block *b_all; 1658 1659 BUG_ON(NULL == state); 1660 1661 pr_info("all_blocks_list:\n"); 1662 list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) { 1663 const struct btrfsic_block_link *l; 1664 1665 pr_info("%c-block @%llu (%s/%llu/%d)\n", 1666 btrfsic_get_block_type(state, b_all), 1667 b_all->logical_bytenr, b_all->dev_state->name, 1668 b_all->dev_bytenr, b_all->mirror_num); 1669 1670 list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) { 1671 pr_info(" %c @%llu (%s/%llu/%d) refers %u* to %c @%llu (%s/%llu/%d)\n", 1672 btrfsic_get_block_type(state, b_all), 1673 b_all->logical_bytenr, b_all->dev_state->name, 1674 b_all->dev_bytenr, b_all->mirror_num, 1675 l->ref_cnt, 1676 btrfsic_get_block_type(state, l->block_ref_to), 1677 l->block_ref_to->logical_bytenr, 1678 l->block_ref_to->dev_state->name, 1679 l->block_ref_to->dev_bytenr, 1680 l->block_ref_to->mirror_num); 1681 } 1682 1683 list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) { 1684 pr_info(" %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n", 1685 btrfsic_get_block_type(state, b_all), 1686 b_all->logical_bytenr, b_all->dev_state->name, 1687 b_all->dev_bytenr, b_all->mirror_num, 1688 l->ref_cnt, 1689 btrfsic_get_block_type(state, l->block_ref_from), 1690 l->block_ref_from->logical_bytenr, 1691 l->block_ref_from->dev_state->name, 1692 l->block_ref_from->dev_bytenr, 1693 l->block_ref_from->mirror_num); 1694 } 1695 1696 pr_info("\n"); 1697 } 1698 } 1699 1700 /* 1701 * Test whether the disk block contains a tree block (leaf or node) 1702 * (note that this test fails for the super block) 1703 */ 1704 static noinline_for_stack int btrfsic_test_for_metadata( 1705 struct btrfsic_state *state, 1706 char **datav, unsigned int num_pages) 1707 { 1708 struct btrfs_fs_info *fs_info = state->fs_info; 1709 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 1710 struct btrfs_header *h; 1711 u8 csum[BTRFS_CSUM_SIZE]; 1712 unsigned int i; 1713 1714 if (num_pages * PAGE_SIZE < state->metablock_size) 1715 return 1; /* not metadata */ 1716 num_pages = state->metablock_size >> PAGE_SHIFT; 1717 h = (struct btrfs_header *)datav[0]; 1718 1719 if (memcmp(h->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE)) 1720 return 1; 1721 1722 shash->tfm = fs_info->csum_shash; 1723 crypto_shash_init(shash); 1724 1725 for (i = 0; i < num_pages; i++) { 1726 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE); 1727 size_t sublen = i ? PAGE_SIZE : 1728 (PAGE_SIZE - BTRFS_CSUM_SIZE); 1729 1730 crypto_shash_update(shash, data, sublen); 1731 } 1732 crypto_shash_final(shash, csum); 1733 if (memcmp(csum, h->csum, state->csum_size)) 1734 return 1; 1735 1736 return 0; /* is metadata */ 1737 } 1738 1739 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 1740 u64 dev_bytenr, char **mapped_datav, 1741 unsigned int num_pages, 1742 struct bio *bio, int *bio_is_patched, 1743 int submit_bio_bh_rw) 1744 { 1745 int is_metadata; 1746 struct btrfsic_block *block; 1747 struct btrfsic_block_data_ctx block_ctx; 1748 int ret; 1749 struct btrfsic_state *state = dev_state->state; 1750 struct block_device *bdev = dev_state->bdev; 1751 unsigned int processed_len; 1752 1753 if (NULL != bio_is_patched) 1754 *bio_is_patched = 0; 1755 1756 again: 1757 if (num_pages == 0) 1758 return; 1759 1760 processed_len = 0; 1761 is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav, 1762 num_pages)); 1763 1764 block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr, 1765 &state->block_hashtable); 1766 if (NULL != block) { 1767 u64 bytenr = 0; 1768 struct btrfsic_block_link *l, *tmp; 1769 1770 if (block->is_superblock) { 1771 bytenr = btrfs_super_bytenr((struct btrfs_super_block *) 1772 mapped_datav[0]); 1773 if (num_pages * PAGE_SIZE < 1774 BTRFS_SUPER_INFO_SIZE) { 1775 pr_info("btrfsic: cannot work with too short bios!\n"); 1776 return; 1777 } 1778 is_metadata = 1; 1779 BUG_ON(!PAGE_ALIGNED(BTRFS_SUPER_INFO_SIZE)); 1780 processed_len = BTRFS_SUPER_INFO_SIZE; 1781 if (state->print_mask & 1782 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { 1783 pr_info("[before new superblock is written]:\n"); 1784 btrfsic_dump_tree_sub(state, block, 0); 1785 } 1786 } 1787 if (is_metadata) { 1788 if (!block->is_superblock) { 1789 if (num_pages * PAGE_SIZE < 1790 state->metablock_size) { 1791 pr_info("btrfsic: cannot work with too short bios!\n"); 1792 return; 1793 } 1794 processed_len = state->metablock_size; 1795 bytenr = btrfs_stack_header_bytenr( 1796 (struct btrfs_header *) 1797 mapped_datav[0]); 1798 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, 1799 dev_state, 1800 dev_bytenr); 1801 } 1802 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) { 1803 if (block->logical_bytenr != bytenr && 1804 !(!block->is_metadata && 1805 block->logical_bytenr == 0)) 1806 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n", 1807 bytenr, dev_state->name, 1808 dev_bytenr, 1809 block->mirror_num, 1810 btrfsic_get_block_type(state, 1811 block), 1812 block->logical_bytenr); 1813 else 1814 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n", 1815 bytenr, dev_state->name, 1816 dev_bytenr, block->mirror_num, 1817 btrfsic_get_block_type(state, 1818 block)); 1819 } 1820 block->logical_bytenr = bytenr; 1821 } else { 1822 if (num_pages * PAGE_SIZE < 1823 state->datablock_size) { 1824 pr_info("btrfsic: cannot work with too short bios!\n"); 1825 return; 1826 } 1827 processed_len = state->datablock_size; 1828 bytenr = block->logical_bytenr; 1829 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1830 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n", 1831 bytenr, dev_state->name, dev_bytenr, 1832 block->mirror_num, 1833 btrfsic_get_block_type(state, block)); 1834 } 1835 1836 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1837 pr_info("ref_to_list: %cE, ref_from_list: %cE\n", 1838 list_empty(&block->ref_to_list) ? ' ' : '!', 1839 list_empty(&block->ref_from_list) ? ' ' : '!'); 1840 if (btrfsic_is_block_ref_by_superblock(state, block, 0)) { 1841 pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), old(gen=%llu, objectid=%llu, type=%d, offset=%llu), new(gen=%llu), which is referenced by most recent superblock (superblockgen=%llu)!\n", 1842 btrfsic_get_block_type(state, block), bytenr, 1843 dev_state->name, dev_bytenr, block->mirror_num, 1844 block->generation, 1845 btrfs_disk_key_objectid(&block->disk_key), 1846 block->disk_key.type, 1847 btrfs_disk_key_offset(&block->disk_key), 1848 btrfs_stack_header_generation( 1849 (struct btrfs_header *) mapped_datav[0]), 1850 state->max_superblock_generation); 1851 btrfsic_dump_tree(state); 1852 } 1853 1854 if (!block->is_iodone && !block->never_written) { 1855 pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu, which is not yet iodone!\n", 1856 btrfsic_get_block_type(state, block), bytenr, 1857 dev_state->name, dev_bytenr, block->mirror_num, 1858 block->generation, 1859 btrfs_stack_header_generation( 1860 (struct btrfs_header *) 1861 mapped_datav[0])); 1862 /* it would not be safe to go on */ 1863 btrfsic_dump_tree(state); 1864 goto continue_loop; 1865 } 1866 1867 /* 1868 * Clear all references of this block. Do not free 1869 * the block itself even if is not referenced anymore 1870 * because it still carries valuable information 1871 * like whether it was ever written and IO completed. 1872 */ 1873 list_for_each_entry_safe(l, tmp, &block->ref_to_list, 1874 node_ref_to) { 1875 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1876 btrfsic_print_rem_link(state, l); 1877 l->ref_cnt--; 1878 if (0 == l->ref_cnt) { 1879 list_del(&l->node_ref_to); 1880 list_del(&l->node_ref_from); 1881 btrfsic_block_link_hashtable_remove(l); 1882 btrfsic_block_link_free(l); 1883 } 1884 } 1885 1886 block_ctx.dev = dev_state; 1887 block_ctx.dev_bytenr = dev_bytenr; 1888 block_ctx.start = bytenr; 1889 block_ctx.len = processed_len; 1890 block_ctx.pagev = NULL; 1891 block_ctx.mem_to_free = NULL; 1892 block_ctx.datav = mapped_datav; 1893 1894 if (is_metadata || state->include_extent_data) { 1895 block->never_written = 0; 1896 block->iodone_w_error = 0; 1897 if (NULL != bio) { 1898 block->is_iodone = 0; 1899 BUG_ON(NULL == bio_is_patched); 1900 if (!*bio_is_patched) { 1901 block->orig_bio_private = 1902 bio->bi_private; 1903 block->orig_bio_end_io = 1904 bio->bi_end_io; 1905 block->next_in_same_bio = NULL; 1906 bio->bi_private = block; 1907 bio->bi_end_io = btrfsic_bio_end_io; 1908 *bio_is_patched = 1; 1909 } else { 1910 struct btrfsic_block *chained_block = 1911 (struct btrfsic_block *) 1912 bio->bi_private; 1913 1914 BUG_ON(NULL == chained_block); 1915 block->orig_bio_private = 1916 chained_block->orig_bio_private; 1917 block->orig_bio_end_io = 1918 chained_block->orig_bio_end_io; 1919 block->next_in_same_bio = chained_block; 1920 bio->bi_private = block; 1921 } 1922 } else { 1923 block->is_iodone = 1; 1924 block->orig_bio_private = NULL; 1925 block->orig_bio_end_io = NULL; 1926 block->next_in_same_bio = NULL; 1927 } 1928 } 1929 1930 block->flush_gen = dev_state->last_flush_gen + 1; 1931 block->submit_bio_bh_rw = submit_bio_bh_rw; 1932 if (is_metadata) { 1933 block->logical_bytenr = bytenr; 1934 block->is_metadata = 1; 1935 if (block->is_superblock) { 1936 BUG_ON(PAGE_SIZE != 1937 BTRFS_SUPER_INFO_SIZE); 1938 ret = btrfsic_process_written_superblock( 1939 state, 1940 block, 1941 (struct btrfs_super_block *) 1942 mapped_datav[0]); 1943 if (state->print_mask & 1944 BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) { 1945 pr_info("[after new superblock is written]:\n"); 1946 btrfsic_dump_tree_sub(state, block, 0); 1947 } 1948 } else { 1949 block->mirror_num = 0; /* unknown */ 1950 ret = btrfsic_process_metablock( 1951 state, 1952 block, 1953 &block_ctx, 1954 0, 0); 1955 } 1956 if (ret) 1957 pr_info("btrfsic: btrfsic_process_metablock(root @%llu) failed!\n", 1958 dev_bytenr); 1959 } else { 1960 block->is_metadata = 0; 1961 block->mirror_num = 0; /* unknown */ 1962 block->generation = BTRFSIC_GENERATION_UNKNOWN; 1963 if (!state->include_extent_data 1964 && list_empty(&block->ref_from_list)) { 1965 /* 1966 * disk block is overwritten with extent 1967 * data (not meta data) and we are configured 1968 * to not include extent data: take the 1969 * chance and free the block's memory 1970 */ 1971 btrfsic_block_hashtable_remove(block); 1972 list_del(&block->all_blocks_node); 1973 btrfsic_block_free(block); 1974 } 1975 } 1976 btrfsic_release_block_ctx(&block_ctx); 1977 } else { 1978 /* block has not been found in hash table */ 1979 u64 bytenr; 1980 1981 if (!is_metadata) { 1982 processed_len = state->datablock_size; 1983 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1984 pr_info("Written block (%s/%llu/?) !found in hash table, D.\n", 1985 dev_state->name, dev_bytenr); 1986 if (!state->include_extent_data) { 1987 /* ignore that written D block */ 1988 goto continue_loop; 1989 } 1990 1991 /* this is getting ugly for the 1992 * include_extent_data case... */ 1993 bytenr = 0; /* unknown */ 1994 } else { 1995 processed_len = state->metablock_size; 1996 bytenr = btrfs_stack_header_bytenr( 1997 (struct btrfs_header *) 1998 mapped_datav[0]); 1999 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state, 2000 dev_bytenr); 2001 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2002 pr_info("Written block @%llu (%s/%llu/?) !found in hash table, M.\n", 2003 bytenr, dev_state->name, dev_bytenr); 2004 } 2005 2006 block_ctx.dev = dev_state; 2007 block_ctx.dev_bytenr = dev_bytenr; 2008 block_ctx.start = bytenr; 2009 block_ctx.len = processed_len; 2010 block_ctx.pagev = NULL; 2011 block_ctx.mem_to_free = NULL; 2012 block_ctx.datav = mapped_datav; 2013 2014 block = btrfsic_block_alloc(); 2015 if (NULL == block) { 2016 pr_info("btrfsic: error, kmalloc failed!\n"); 2017 btrfsic_release_block_ctx(&block_ctx); 2018 goto continue_loop; 2019 } 2020 block->dev_state = dev_state; 2021 block->dev_bytenr = dev_bytenr; 2022 block->logical_bytenr = bytenr; 2023 block->is_metadata = is_metadata; 2024 block->never_written = 0; 2025 block->iodone_w_error = 0; 2026 block->mirror_num = 0; /* unknown */ 2027 block->flush_gen = dev_state->last_flush_gen + 1; 2028 block->submit_bio_bh_rw = submit_bio_bh_rw; 2029 if (NULL != bio) { 2030 block->is_iodone = 0; 2031 BUG_ON(NULL == bio_is_patched); 2032 if (!*bio_is_patched) { 2033 block->orig_bio_private = bio->bi_private; 2034 block->orig_bio_end_io = bio->bi_end_io; 2035 block->next_in_same_bio = NULL; 2036 bio->bi_private = block; 2037 bio->bi_end_io = btrfsic_bio_end_io; 2038 *bio_is_patched = 1; 2039 } else { 2040 struct btrfsic_block *chained_block = 2041 (struct btrfsic_block *) 2042 bio->bi_private; 2043 2044 BUG_ON(NULL == chained_block); 2045 block->orig_bio_private = 2046 chained_block->orig_bio_private; 2047 block->orig_bio_end_io = 2048 chained_block->orig_bio_end_io; 2049 block->next_in_same_bio = chained_block; 2050 bio->bi_private = block; 2051 } 2052 } else { 2053 block->is_iodone = 1; 2054 block->orig_bio_private = NULL; 2055 block->orig_bio_end_io = NULL; 2056 block->next_in_same_bio = NULL; 2057 } 2058 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2059 pr_info("New written %c-block @%llu (%s/%llu/%d)\n", 2060 is_metadata ? 'M' : 'D', 2061 block->logical_bytenr, block->dev_state->name, 2062 block->dev_bytenr, block->mirror_num); 2063 list_add(&block->all_blocks_node, &state->all_blocks_list); 2064 btrfsic_block_hashtable_add(block, &state->block_hashtable); 2065 2066 if (is_metadata) { 2067 ret = btrfsic_process_metablock(state, block, 2068 &block_ctx, 0, 0); 2069 if (ret) 2070 pr_info("btrfsic: process_metablock(root @%llu) failed!\n", 2071 dev_bytenr); 2072 } 2073 btrfsic_release_block_ctx(&block_ctx); 2074 } 2075 2076 continue_loop: 2077 BUG_ON(!processed_len); 2078 dev_bytenr += processed_len; 2079 mapped_datav += processed_len >> PAGE_SHIFT; 2080 num_pages -= processed_len >> PAGE_SHIFT; 2081 goto again; 2082 } 2083 2084 static void btrfsic_bio_end_io(struct bio *bp) 2085 { 2086 struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private; 2087 int iodone_w_error; 2088 2089 /* mutex is not held! This is not save if IO is not yet completed 2090 * on umount */ 2091 iodone_w_error = 0; 2092 if (bp->bi_status) 2093 iodone_w_error = 1; 2094 2095 BUG_ON(NULL == block); 2096 bp->bi_private = block->orig_bio_private; 2097 bp->bi_end_io = block->orig_bio_end_io; 2098 2099 do { 2100 struct btrfsic_block *next_block; 2101 struct btrfsic_dev_state *const dev_state = block->dev_state; 2102 2103 if ((dev_state->state->print_mask & 2104 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2105 pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n", 2106 bp->bi_status, 2107 btrfsic_get_block_type(dev_state->state, block), 2108 block->logical_bytenr, dev_state->name, 2109 block->dev_bytenr, block->mirror_num); 2110 next_block = block->next_in_same_bio; 2111 block->iodone_w_error = iodone_w_error; 2112 if (block->submit_bio_bh_rw & REQ_PREFLUSH) { 2113 dev_state->last_flush_gen++; 2114 if ((dev_state->state->print_mask & 2115 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2116 pr_info("bio_end_io() new %s flush_gen=%llu\n", 2117 dev_state->name, 2118 dev_state->last_flush_gen); 2119 } 2120 if (block->submit_bio_bh_rw & REQ_FUA) 2121 block->flush_gen = 0; /* FUA completed means block is 2122 * on disk */ 2123 block->is_iodone = 1; /* for FLUSH, this releases the block */ 2124 block = next_block; 2125 } while (NULL != block); 2126 2127 bp->bi_end_io(bp); 2128 } 2129 2130 static int btrfsic_process_written_superblock( 2131 struct btrfsic_state *state, 2132 struct btrfsic_block *const superblock, 2133 struct btrfs_super_block *const super_hdr) 2134 { 2135 struct btrfs_fs_info *fs_info = state->fs_info; 2136 int pass; 2137 2138 superblock->generation = btrfs_super_generation(super_hdr); 2139 if (!(superblock->generation > state->max_superblock_generation || 2140 0 == state->max_superblock_generation)) { 2141 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2142 pr_info("btrfsic: superblock @%llu (%s/%llu/%d) with old gen %llu <= %llu\n", 2143 superblock->logical_bytenr, 2144 superblock->dev_state->name, 2145 superblock->dev_bytenr, superblock->mirror_num, 2146 btrfs_super_generation(super_hdr), 2147 state->max_superblock_generation); 2148 } else { 2149 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2150 pr_info("btrfsic: got new superblock @%llu (%s/%llu/%d) with new gen %llu > %llu\n", 2151 superblock->logical_bytenr, 2152 superblock->dev_state->name, 2153 superblock->dev_bytenr, superblock->mirror_num, 2154 btrfs_super_generation(super_hdr), 2155 state->max_superblock_generation); 2156 2157 state->max_superblock_generation = 2158 btrfs_super_generation(super_hdr); 2159 state->latest_superblock = superblock; 2160 } 2161 2162 for (pass = 0; pass < 3; pass++) { 2163 int ret; 2164 u64 next_bytenr; 2165 struct btrfsic_block *next_block; 2166 struct btrfsic_block_data_ctx tmp_next_block_ctx; 2167 struct btrfsic_block_link *l; 2168 int num_copies; 2169 int mirror_num; 2170 const char *additional_string = NULL; 2171 struct btrfs_disk_key tmp_disk_key = {0}; 2172 2173 btrfs_set_disk_key_objectid(&tmp_disk_key, 2174 BTRFS_ROOT_ITEM_KEY); 2175 btrfs_set_disk_key_objectid(&tmp_disk_key, 0); 2176 2177 switch (pass) { 2178 case 0: 2179 btrfs_set_disk_key_objectid(&tmp_disk_key, 2180 BTRFS_ROOT_TREE_OBJECTID); 2181 additional_string = "root "; 2182 next_bytenr = btrfs_super_root(super_hdr); 2183 if (state->print_mask & 2184 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2185 pr_info("root@%llu\n", next_bytenr); 2186 break; 2187 case 1: 2188 btrfs_set_disk_key_objectid(&tmp_disk_key, 2189 BTRFS_CHUNK_TREE_OBJECTID); 2190 additional_string = "chunk "; 2191 next_bytenr = btrfs_super_chunk_root(super_hdr); 2192 if (state->print_mask & 2193 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2194 pr_info("chunk@%llu\n", next_bytenr); 2195 break; 2196 case 2: 2197 btrfs_set_disk_key_objectid(&tmp_disk_key, 2198 BTRFS_TREE_LOG_OBJECTID); 2199 additional_string = "log "; 2200 next_bytenr = btrfs_super_log_root(super_hdr); 2201 if (0 == next_bytenr) 2202 continue; 2203 if (state->print_mask & 2204 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2205 pr_info("log@%llu\n", next_bytenr); 2206 break; 2207 } 2208 2209 num_copies = btrfs_num_copies(fs_info, next_bytenr, 2210 BTRFS_SUPER_INFO_SIZE); 2211 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 2212 pr_info("num_copies(log_bytenr=%llu) = %d\n", 2213 next_bytenr, num_copies); 2214 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2215 int was_created; 2216 2217 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2218 pr_info("btrfsic_process_written_superblock(mirror_num=%d)\n", mirror_num); 2219 ret = btrfsic_map_block(state, next_bytenr, 2220 BTRFS_SUPER_INFO_SIZE, 2221 &tmp_next_block_ctx, 2222 mirror_num); 2223 if (ret) { 2224 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 2225 next_bytenr, mirror_num); 2226 return -1; 2227 } 2228 2229 next_block = btrfsic_block_lookup_or_add( 2230 state, 2231 &tmp_next_block_ctx, 2232 additional_string, 2233 1, 0, 1, 2234 mirror_num, 2235 &was_created); 2236 if (NULL == next_block) { 2237 pr_info("btrfsic: error, kmalloc failed!\n"); 2238 btrfsic_release_block_ctx(&tmp_next_block_ctx); 2239 return -1; 2240 } 2241 2242 next_block->disk_key = tmp_disk_key; 2243 if (was_created) 2244 next_block->generation = 2245 BTRFSIC_GENERATION_UNKNOWN; 2246 l = btrfsic_block_link_lookup_or_add( 2247 state, 2248 &tmp_next_block_ctx, 2249 next_block, 2250 superblock, 2251 BTRFSIC_GENERATION_UNKNOWN); 2252 btrfsic_release_block_ctx(&tmp_next_block_ctx); 2253 if (NULL == l) 2254 return -1; 2255 } 2256 } 2257 2258 if (WARN_ON(-1 == btrfsic_check_all_ref_blocks(state, superblock, 0))) 2259 btrfsic_dump_tree(state); 2260 2261 return 0; 2262 } 2263 2264 static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, 2265 struct btrfsic_block *const block, 2266 int recursion_level) 2267 { 2268 const struct btrfsic_block_link *l; 2269 int ret = 0; 2270 2271 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { 2272 /* 2273 * Note that this situation can happen and does not 2274 * indicate an error in regular cases. It happens 2275 * when disk blocks are freed and later reused. 2276 * The check-integrity module is not aware of any 2277 * block free operations, it just recognizes block 2278 * write operations. Therefore it keeps the linkage 2279 * information for a block until a block is 2280 * rewritten. This can temporarily cause incorrect 2281 * and even circular linkage information. This 2282 * causes no harm unless such blocks are referenced 2283 * by the most recent super block. 2284 */ 2285 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2286 pr_info("btrfsic: abort cyclic linkage (case 1).\n"); 2287 2288 return ret; 2289 } 2290 2291 /* 2292 * This algorithm is recursive because the amount of used stack 2293 * space is very small and the max recursion depth is limited. 2294 */ 2295 list_for_each_entry(l, &block->ref_to_list, node_ref_to) { 2296 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2297 pr_info("rl=%d, %c @%llu (%s/%llu/%d) %u* refers to %c @%llu (%s/%llu/%d)\n", 2298 recursion_level, 2299 btrfsic_get_block_type(state, block), 2300 block->logical_bytenr, block->dev_state->name, 2301 block->dev_bytenr, block->mirror_num, 2302 l->ref_cnt, 2303 btrfsic_get_block_type(state, l->block_ref_to), 2304 l->block_ref_to->logical_bytenr, 2305 l->block_ref_to->dev_state->name, 2306 l->block_ref_to->dev_bytenr, 2307 l->block_ref_to->mirror_num); 2308 if (l->block_ref_to->never_written) { 2309 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is never written!\n", 2310 btrfsic_get_block_type(state, l->block_ref_to), 2311 l->block_ref_to->logical_bytenr, 2312 l->block_ref_to->dev_state->name, 2313 l->block_ref_to->dev_bytenr, 2314 l->block_ref_to->mirror_num); 2315 ret = -1; 2316 } else if (!l->block_ref_to->is_iodone) { 2317 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not yet iodone!\n", 2318 btrfsic_get_block_type(state, l->block_ref_to), 2319 l->block_ref_to->logical_bytenr, 2320 l->block_ref_to->dev_state->name, 2321 l->block_ref_to->dev_bytenr, 2322 l->block_ref_to->mirror_num); 2323 ret = -1; 2324 } else if (l->block_ref_to->iodone_w_error) { 2325 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which has write error!\n", 2326 btrfsic_get_block_type(state, l->block_ref_to), 2327 l->block_ref_to->logical_bytenr, 2328 l->block_ref_to->dev_state->name, 2329 l->block_ref_to->dev_bytenr, 2330 l->block_ref_to->mirror_num); 2331 ret = -1; 2332 } else if (l->parent_generation != 2333 l->block_ref_to->generation && 2334 BTRFSIC_GENERATION_UNKNOWN != 2335 l->parent_generation && 2336 BTRFSIC_GENERATION_UNKNOWN != 2337 l->block_ref_to->generation) { 2338 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) with generation %llu != parent generation %llu!\n", 2339 btrfsic_get_block_type(state, l->block_ref_to), 2340 l->block_ref_to->logical_bytenr, 2341 l->block_ref_to->dev_state->name, 2342 l->block_ref_to->dev_bytenr, 2343 l->block_ref_to->mirror_num, 2344 l->block_ref_to->generation, 2345 l->parent_generation); 2346 ret = -1; 2347 } else if (l->block_ref_to->flush_gen > 2348 l->block_ref_to->dev_state->last_flush_gen) { 2349 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not flushed out of disk's write cache (block flush_gen=%llu, dev->flush_gen=%llu)!\n", 2350 btrfsic_get_block_type(state, l->block_ref_to), 2351 l->block_ref_to->logical_bytenr, 2352 l->block_ref_to->dev_state->name, 2353 l->block_ref_to->dev_bytenr, 2354 l->block_ref_to->mirror_num, block->flush_gen, 2355 l->block_ref_to->dev_state->last_flush_gen); 2356 ret = -1; 2357 } else if (-1 == btrfsic_check_all_ref_blocks(state, 2358 l->block_ref_to, 2359 recursion_level + 2360 1)) { 2361 ret = -1; 2362 } 2363 } 2364 2365 return ret; 2366 } 2367 2368 static int btrfsic_is_block_ref_by_superblock( 2369 const struct btrfsic_state *state, 2370 const struct btrfsic_block *block, 2371 int recursion_level) 2372 { 2373 const struct btrfsic_block_link *l; 2374 2375 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { 2376 /* refer to comment at "abort cyclic linkage (case 1)" */ 2377 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2378 pr_info("btrfsic: abort cyclic linkage (case 2).\n"); 2379 2380 return 0; 2381 } 2382 2383 /* 2384 * This algorithm is recursive because the amount of used stack space 2385 * is very small and the max recursion depth is limited. 2386 */ 2387 list_for_each_entry(l, &block->ref_from_list, node_ref_from) { 2388 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2389 pr_info("rl=%d, %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n", 2390 recursion_level, 2391 btrfsic_get_block_type(state, block), 2392 block->logical_bytenr, block->dev_state->name, 2393 block->dev_bytenr, block->mirror_num, 2394 l->ref_cnt, 2395 btrfsic_get_block_type(state, l->block_ref_from), 2396 l->block_ref_from->logical_bytenr, 2397 l->block_ref_from->dev_state->name, 2398 l->block_ref_from->dev_bytenr, 2399 l->block_ref_from->mirror_num); 2400 if (l->block_ref_from->is_superblock && 2401 state->latest_superblock->dev_bytenr == 2402 l->block_ref_from->dev_bytenr && 2403 state->latest_superblock->dev_state->bdev == 2404 l->block_ref_from->dev_state->bdev) 2405 return 1; 2406 else if (btrfsic_is_block_ref_by_superblock(state, 2407 l->block_ref_from, 2408 recursion_level + 2409 1)) 2410 return 1; 2411 } 2412 2413 return 0; 2414 } 2415 2416 static void btrfsic_print_add_link(const struct btrfsic_state *state, 2417 const struct btrfsic_block_link *l) 2418 { 2419 pr_info("Add %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n", 2420 l->ref_cnt, 2421 btrfsic_get_block_type(state, l->block_ref_from), 2422 l->block_ref_from->logical_bytenr, 2423 l->block_ref_from->dev_state->name, 2424 l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, 2425 btrfsic_get_block_type(state, l->block_ref_to), 2426 l->block_ref_to->logical_bytenr, 2427 l->block_ref_to->dev_state->name, l->block_ref_to->dev_bytenr, 2428 l->block_ref_to->mirror_num); 2429 } 2430 2431 static void btrfsic_print_rem_link(const struct btrfsic_state *state, 2432 const struct btrfsic_block_link *l) 2433 { 2434 pr_info("Rem %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n", 2435 l->ref_cnt, 2436 btrfsic_get_block_type(state, l->block_ref_from), 2437 l->block_ref_from->logical_bytenr, 2438 l->block_ref_from->dev_state->name, 2439 l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, 2440 btrfsic_get_block_type(state, l->block_ref_to), 2441 l->block_ref_to->logical_bytenr, 2442 l->block_ref_to->dev_state->name, l->block_ref_to->dev_bytenr, 2443 l->block_ref_to->mirror_num); 2444 } 2445 2446 static char btrfsic_get_block_type(const struct btrfsic_state *state, 2447 const struct btrfsic_block *block) 2448 { 2449 if (block->is_superblock && 2450 state->latest_superblock->dev_bytenr == block->dev_bytenr && 2451 state->latest_superblock->dev_state->bdev == block->dev_state->bdev) 2452 return 'S'; 2453 else if (block->is_superblock) 2454 return 's'; 2455 else if (block->is_metadata) 2456 return 'M'; 2457 else 2458 return 'D'; 2459 } 2460 2461 static void btrfsic_dump_tree(const struct btrfsic_state *state) 2462 { 2463 btrfsic_dump_tree_sub(state, state->latest_superblock, 0); 2464 } 2465 2466 static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, 2467 const struct btrfsic_block *block, 2468 int indent_level) 2469 { 2470 const struct btrfsic_block_link *l; 2471 int indent_add; 2472 static char buf[80]; 2473 int cursor_position; 2474 2475 /* 2476 * Should better fill an on-stack buffer with a complete line and 2477 * dump it at once when it is time to print a newline character. 2478 */ 2479 2480 /* 2481 * This algorithm is recursive because the amount of used stack space 2482 * is very small and the max recursion depth is limited. 2483 */ 2484 indent_add = sprintf(buf, "%c-%llu(%s/%llu/%u)", 2485 btrfsic_get_block_type(state, block), 2486 block->logical_bytenr, block->dev_state->name, 2487 block->dev_bytenr, block->mirror_num); 2488 if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { 2489 printk("[...]\n"); 2490 return; 2491 } 2492 printk(buf); 2493 indent_level += indent_add; 2494 if (list_empty(&block->ref_to_list)) { 2495 printk("\n"); 2496 return; 2497 } 2498 if (block->mirror_num > 1 && 2499 !(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) { 2500 printk(" [...]\n"); 2501 return; 2502 } 2503 2504 cursor_position = indent_level; 2505 list_for_each_entry(l, &block->ref_to_list, node_ref_to) { 2506 while (cursor_position < indent_level) { 2507 printk(" "); 2508 cursor_position++; 2509 } 2510 if (l->ref_cnt > 1) 2511 indent_add = sprintf(buf, " %d*--> ", l->ref_cnt); 2512 else 2513 indent_add = sprintf(buf, " --> "); 2514 if (indent_level + indent_add > 2515 BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { 2516 printk("[...]\n"); 2517 cursor_position = 0; 2518 continue; 2519 } 2520 2521 printk(buf); 2522 2523 btrfsic_dump_tree_sub(state, l->block_ref_to, 2524 indent_level + indent_add); 2525 cursor_position = 0; 2526 } 2527 } 2528 2529 static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( 2530 struct btrfsic_state *state, 2531 struct btrfsic_block_data_ctx *next_block_ctx, 2532 struct btrfsic_block *next_block, 2533 struct btrfsic_block *from_block, 2534 u64 parent_generation) 2535 { 2536 struct btrfsic_block_link *l; 2537 2538 l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev, 2539 next_block_ctx->dev_bytenr, 2540 from_block->dev_state->bdev, 2541 from_block->dev_bytenr, 2542 &state->block_link_hashtable); 2543 if (NULL == l) { 2544 l = btrfsic_block_link_alloc(); 2545 if (NULL == l) { 2546 pr_info("btrfsic: error, kmalloc failed!\n"); 2547 return NULL; 2548 } 2549 2550 l->block_ref_to = next_block; 2551 l->block_ref_from = from_block; 2552 l->ref_cnt = 1; 2553 l->parent_generation = parent_generation; 2554 2555 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2556 btrfsic_print_add_link(state, l); 2557 2558 list_add(&l->node_ref_to, &from_block->ref_to_list); 2559 list_add(&l->node_ref_from, &next_block->ref_from_list); 2560 2561 btrfsic_block_link_hashtable_add(l, 2562 &state->block_link_hashtable); 2563 } else { 2564 l->ref_cnt++; 2565 l->parent_generation = parent_generation; 2566 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2567 btrfsic_print_add_link(state, l); 2568 } 2569 2570 return l; 2571 } 2572 2573 static struct btrfsic_block *btrfsic_block_lookup_or_add( 2574 struct btrfsic_state *state, 2575 struct btrfsic_block_data_ctx *block_ctx, 2576 const char *additional_string, 2577 int is_metadata, 2578 int is_iodone, 2579 int never_written, 2580 int mirror_num, 2581 int *was_created) 2582 { 2583 struct btrfsic_block *block; 2584 2585 block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev, 2586 block_ctx->dev_bytenr, 2587 &state->block_hashtable); 2588 if (NULL == block) { 2589 struct btrfsic_dev_state *dev_state; 2590 2591 block = btrfsic_block_alloc(); 2592 if (NULL == block) { 2593 pr_info("btrfsic: error, kmalloc failed!\n"); 2594 return NULL; 2595 } 2596 dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev->bd_dev); 2597 if (NULL == dev_state) { 2598 pr_info("btrfsic: error, lookup dev_state failed!\n"); 2599 btrfsic_block_free(block); 2600 return NULL; 2601 } 2602 block->dev_state = dev_state; 2603 block->dev_bytenr = block_ctx->dev_bytenr; 2604 block->logical_bytenr = block_ctx->start; 2605 block->is_metadata = is_metadata; 2606 block->is_iodone = is_iodone; 2607 block->never_written = never_written; 2608 block->mirror_num = mirror_num; 2609 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2610 pr_info("New %s%c-block @%llu (%s/%llu/%d)\n", 2611 additional_string, 2612 btrfsic_get_block_type(state, block), 2613 block->logical_bytenr, dev_state->name, 2614 block->dev_bytenr, mirror_num); 2615 list_add(&block->all_blocks_node, &state->all_blocks_list); 2616 btrfsic_block_hashtable_add(block, &state->block_hashtable); 2617 if (NULL != was_created) 2618 *was_created = 1; 2619 } else { 2620 if (NULL != was_created) 2621 *was_created = 0; 2622 } 2623 2624 return block; 2625 } 2626 2627 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 2628 u64 bytenr, 2629 struct btrfsic_dev_state *dev_state, 2630 u64 dev_bytenr) 2631 { 2632 struct btrfs_fs_info *fs_info = state->fs_info; 2633 struct btrfsic_block_data_ctx block_ctx; 2634 int num_copies; 2635 int mirror_num; 2636 int match = 0; 2637 int ret; 2638 2639 num_copies = btrfs_num_copies(fs_info, bytenr, state->metablock_size); 2640 2641 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2642 ret = btrfsic_map_block(state, bytenr, state->metablock_size, 2643 &block_ctx, mirror_num); 2644 if (ret) { 2645 pr_info("btrfsic: btrfsic_map_block(logical @%llu, mirror %d) failed!\n", 2646 bytenr, mirror_num); 2647 continue; 2648 } 2649 2650 if (dev_state->bdev == block_ctx.dev->bdev && 2651 dev_bytenr == block_ctx.dev_bytenr) { 2652 match++; 2653 btrfsic_release_block_ctx(&block_ctx); 2654 break; 2655 } 2656 btrfsic_release_block_ctx(&block_ctx); 2657 } 2658 2659 if (WARN_ON(!match)) { 2660 pr_info("btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio, buffer->log_bytenr=%llu, submit_bio(bdev=%s, phys_bytenr=%llu)!\n", 2661 bytenr, dev_state->name, dev_bytenr); 2662 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2663 ret = btrfsic_map_block(state, bytenr, 2664 state->metablock_size, 2665 &block_ctx, mirror_num); 2666 if (ret) 2667 continue; 2668 2669 pr_info("Read logical bytenr @%llu maps to (%s/%llu/%d)\n", 2670 bytenr, block_ctx.dev->name, 2671 block_ctx.dev_bytenr, mirror_num); 2672 } 2673 } 2674 } 2675 2676 static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev) 2677 { 2678 return btrfsic_dev_state_hashtable_lookup(dev, 2679 &btrfsic_dev_state_hashtable); 2680 } 2681 2682 static void __btrfsic_submit_bio(struct bio *bio) 2683 { 2684 struct btrfsic_dev_state *dev_state; 2685 2686 if (!btrfsic_is_initialized) 2687 return; 2688 2689 mutex_lock(&btrfsic_mutex); 2690 /* since btrfsic_submit_bio() is also called before 2691 * btrfsic_mount(), this might return NULL */ 2692 dev_state = btrfsic_dev_state_lookup(bio_dev(bio) + bio->bi_partno); 2693 if (NULL != dev_state && 2694 (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) { 2695 unsigned int i = 0; 2696 u64 dev_bytenr; 2697 u64 cur_bytenr; 2698 struct bio_vec bvec; 2699 struct bvec_iter iter; 2700 int bio_is_patched; 2701 char **mapped_datav; 2702 unsigned int segs = bio_segments(bio); 2703 2704 dev_bytenr = 512 * bio->bi_iter.bi_sector; 2705 bio_is_patched = 0; 2706 if (dev_state->state->print_mask & 2707 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2708 pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_disk=%p)\n", 2709 bio_op(bio), bio->bi_opf, segs, 2710 (unsigned long long)bio->bi_iter.bi_sector, 2711 dev_bytenr, bio->bi_disk); 2712 2713 mapped_datav = kmalloc_array(segs, 2714 sizeof(*mapped_datav), GFP_NOFS); 2715 if (!mapped_datav) 2716 goto leave; 2717 cur_bytenr = dev_bytenr; 2718 2719 bio_for_each_segment(bvec, bio, iter) { 2720 BUG_ON(bvec.bv_len != PAGE_SIZE); 2721 mapped_datav[i] = kmap(bvec.bv_page); 2722 i++; 2723 2724 if (dev_state->state->print_mask & 2725 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE) 2726 pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n", 2727 i, cur_bytenr, bvec.bv_len, bvec.bv_offset); 2728 cur_bytenr += bvec.bv_len; 2729 } 2730 btrfsic_process_written_block(dev_state, dev_bytenr, 2731 mapped_datav, segs, 2732 bio, &bio_is_patched, 2733 bio->bi_opf); 2734 bio_for_each_segment(bvec, bio, iter) 2735 kunmap(bvec.bv_page); 2736 kfree(mapped_datav); 2737 } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) { 2738 if (dev_state->state->print_mask & 2739 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2740 pr_info("submit_bio(rw=%d,0x%x FLUSH, disk=%p)\n", 2741 bio_op(bio), bio->bi_opf, bio->bi_disk); 2742 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2743 if ((dev_state->state->print_mask & 2744 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 2745 BTRFSIC_PRINT_MASK_VERBOSE))) 2746 pr_info("btrfsic_submit_bio(%s) with FLUSH but dummy block already in use (ignored)!\n", 2747 dev_state->name); 2748 } else { 2749 struct btrfsic_block *const block = 2750 &dev_state->dummy_block_for_bio_bh_flush; 2751 2752 block->is_iodone = 0; 2753 block->never_written = 0; 2754 block->iodone_w_error = 0; 2755 block->flush_gen = dev_state->last_flush_gen + 1; 2756 block->submit_bio_bh_rw = bio->bi_opf; 2757 block->orig_bio_private = bio->bi_private; 2758 block->orig_bio_end_io = bio->bi_end_io; 2759 block->next_in_same_bio = NULL; 2760 bio->bi_private = block; 2761 bio->bi_end_io = btrfsic_bio_end_io; 2762 } 2763 } 2764 leave: 2765 mutex_unlock(&btrfsic_mutex); 2766 } 2767 2768 void btrfsic_submit_bio(struct bio *bio) 2769 { 2770 __btrfsic_submit_bio(bio); 2771 submit_bio(bio); 2772 } 2773 2774 int btrfsic_submit_bio_wait(struct bio *bio) 2775 { 2776 __btrfsic_submit_bio(bio); 2777 return submit_bio_wait(bio); 2778 } 2779 2780 int btrfsic_mount(struct btrfs_fs_info *fs_info, 2781 struct btrfs_fs_devices *fs_devices, 2782 int including_extent_data, u32 print_mask) 2783 { 2784 int ret; 2785 struct btrfsic_state *state; 2786 struct list_head *dev_head = &fs_devices->devices; 2787 struct btrfs_device *device; 2788 2789 if (!PAGE_ALIGNED(fs_info->nodesize)) { 2790 pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n", 2791 fs_info->nodesize, PAGE_SIZE); 2792 return -1; 2793 } 2794 if (!PAGE_ALIGNED(fs_info->sectorsize)) { 2795 pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n", 2796 fs_info->sectorsize, PAGE_SIZE); 2797 return -1; 2798 } 2799 state = kvzalloc(sizeof(*state), GFP_KERNEL); 2800 if (!state) { 2801 pr_info("btrfs check-integrity: allocation failed!\n"); 2802 return -ENOMEM; 2803 } 2804 2805 if (!btrfsic_is_initialized) { 2806 mutex_init(&btrfsic_mutex); 2807 btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable); 2808 btrfsic_is_initialized = 1; 2809 } 2810 mutex_lock(&btrfsic_mutex); 2811 state->fs_info = fs_info; 2812 state->print_mask = print_mask; 2813 state->include_extent_data = including_extent_data; 2814 state->csum_size = 0; 2815 state->metablock_size = fs_info->nodesize; 2816 state->datablock_size = fs_info->sectorsize; 2817 INIT_LIST_HEAD(&state->all_blocks_list); 2818 btrfsic_block_hashtable_init(&state->block_hashtable); 2819 btrfsic_block_link_hashtable_init(&state->block_link_hashtable); 2820 state->max_superblock_generation = 0; 2821 state->latest_superblock = NULL; 2822 2823 list_for_each_entry(device, dev_head, dev_list) { 2824 struct btrfsic_dev_state *ds; 2825 const char *p; 2826 2827 if (!device->bdev || !device->name) 2828 continue; 2829 2830 ds = btrfsic_dev_state_alloc(); 2831 if (NULL == ds) { 2832 pr_info("btrfs check-integrity: kmalloc() failed!\n"); 2833 mutex_unlock(&btrfsic_mutex); 2834 return -ENOMEM; 2835 } 2836 ds->bdev = device->bdev; 2837 ds->state = state; 2838 bdevname(ds->bdev, ds->name); 2839 ds->name[BDEVNAME_SIZE - 1] = '\0'; 2840 p = kbasename(ds->name); 2841 strlcpy(ds->name, p, sizeof(ds->name)); 2842 btrfsic_dev_state_hashtable_add(ds, 2843 &btrfsic_dev_state_hashtable); 2844 } 2845 2846 ret = btrfsic_process_superblock(state, fs_devices); 2847 if (0 != ret) { 2848 mutex_unlock(&btrfsic_mutex); 2849 btrfsic_unmount(fs_devices); 2850 return ret; 2851 } 2852 2853 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE) 2854 btrfsic_dump_database(state); 2855 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE) 2856 btrfsic_dump_tree(state); 2857 2858 mutex_unlock(&btrfsic_mutex); 2859 return 0; 2860 } 2861 2862 void btrfsic_unmount(struct btrfs_fs_devices *fs_devices) 2863 { 2864 struct btrfsic_block *b_all, *tmp_all; 2865 struct btrfsic_state *state; 2866 struct list_head *dev_head = &fs_devices->devices; 2867 struct btrfs_device *device; 2868 2869 if (!btrfsic_is_initialized) 2870 return; 2871 2872 mutex_lock(&btrfsic_mutex); 2873 2874 state = NULL; 2875 list_for_each_entry(device, dev_head, dev_list) { 2876 struct btrfsic_dev_state *ds; 2877 2878 if (!device->bdev || !device->name) 2879 continue; 2880 2881 ds = btrfsic_dev_state_hashtable_lookup( 2882 device->bdev->bd_dev, 2883 &btrfsic_dev_state_hashtable); 2884 if (NULL != ds) { 2885 state = ds->state; 2886 btrfsic_dev_state_hashtable_remove(ds); 2887 btrfsic_dev_state_free(ds); 2888 } 2889 } 2890 2891 if (NULL == state) { 2892 pr_info("btrfsic: error, cannot find state information on umount!\n"); 2893 mutex_unlock(&btrfsic_mutex); 2894 return; 2895 } 2896 2897 /* 2898 * Don't care about keeping the lists' state up to date, 2899 * just free all memory that was allocated dynamically. 2900 * Free the blocks and the block_links. 2901 */ 2902 list_for_each_entry_safe(b_all, tmp_all, &state->all_blocks_list, 2903 all_blocks_node) { 2904 struct btrfsic_block_link *l, *tmp; 2905 2906 list_for_each_entry_safe(l, tmp, &b_all->ref_to_list, 2907 node_ref_to) { 2908 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2909 btrfsic_print_rem_link(state, l); 2910 2911 l->ref_cnt--; 2912 if (0 == l->ref_cnt) 2913 btrfsic_block_link_free(l); 2914 } 2915 2916 if (b_all->is_iodone || b_all->never_written) 2917 btrfsic_block_free(b_all); 2918 else 2919 pr_info("btrfs: attempt to free %c-block @%llu (%s/%llu/%d) on umount which is not yet iodone!\n", 2920 btrfsic_get_block_type(state, b_all), 2921 b_all->logical_bytenr, b_all->dev_state->name, 2922 b_all->dev_bytenr, b_all->mirror_num); 2923 } 2924 2925 mutex_unlock(&btrfsic_mutex); 2926 2927 kvfree(state); 2928 } 2929