1 /* 2 * Copyright (C) STRATO AG 2011. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 /* 20 * This module can be used to catch cases when the btrfs kernel 21 * code executes write requests to the disk that bring the file 22 * system in an inconsistent state. In such a state, a power-loss 23 * or kernel panic event would cause that the data on disk is 24 * lost or at least damaged. 25 * 26 * Code is added that examines all block write requests during 27 * runtime (including writes of the super block). Three rules 28 * are verified and an error is printed on violation of the 29 * rules: 30 * 1. It is not allowed to write a disk block which is 31 * currently referenced by the super block (either directly 32 * or indirectly). 33 * 2. When a super block is written, it is verified that all 34 * referenced (directly or indirectly) blocks fulfill the 35 * following requirements: 36 * 2a. All referenced blocks have either been present when 37 * the file system was mounted, (i.e., they have been 38 * referenced by the super block) or they have been 39 * written since then and the write completion callback 40 * was called and no write error was indicated and a 41 * FLUSH request to the device where these blocks are 42 * located was received and completed. 43 * 2b. All referenced blocks need to have a generation 44 * number which is equal to the parent's number. 45 * 46 * One issue that was found using this module was that the log 47 * tree on disk became temporarily corrupted because disk blocks 48 * that had been in use for the log tree had been freed and 49 * reused too early, while being referenced by the written super 50 * block. 51 * 52 * The search term in the kernel log that can be used to filter 53 * on the existence of detected integrity issues is 54 * "btrfs: attempt". 55 * 56 * The integrity check is enabled via mount options. These 57 * mount options are only supported if the integrity check 58 * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY. 59 * 60 * Example #1, apply integrity checks to all metadata: 61 * mount /dev/sdb1 /mnt -o check_int 62 * 63 * Example #2, apply integrity checks to all metadata and 64 * to data extents: 65 * mount /dev/sdb1 /mnt -o check_int_data 66 * 67 * Example #3, apply integrity checks to all metadata and dump 68 * the tree that the super block references to kernel messages 69 * each time after a super block was written: 70 * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263 71 * 72 * If the integrity check tool is included and activated in 73 * the mount options, plenty of kernel memory is used, and 74 * plenty of additional CPU cycles are spent. Enabling this 75 * functionality is not intended for normal use. In most 76 * cases, unless you are a btrfs developer who needs to verify 77 * the integrity of (super)-block write requests, do not 78 * enable the config option BTRFS_FS_CHECK_INTEGRITY to 79 * include and compile the integrity check tool. 80 * 81 * Expect millions of lines of information in the kernel log with an 82 * enabled check_int_print_mask. Therefore set LOG_BUF_SHIFT in the 83 * kernel config to at least 26 (which is 64MB). Usually the value is 84 * limited to 21 (which is 2MB) in init/Kconfig. The file needs to be 85 * changed like this before LOG_BUF_SHIFT can be set to a high value: 86 * config LOG_BUF_SHIFT 87 * int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" 88 * range 12 30 89 */ 90 91 #include <linux/sched.h> 92 #include <linux/slab.h> 93 #include <linux/buffer_head.h> 94 #include <linux/mutex.h> 95 #include <linux/genhd.h> 96 #include <linux/blkdev.h> 97 #include <linux/vmalloc.h> 98 #include <linux/string.h> 99 #include "ctree.h" 100 #include "disk-io.h" 101 #include "hash.h" 102 #include "transaction.h" 103 #include "extent_io.h" 104 #include "volumes.h" 105 #include "print-tree.h" 106 #include "locking.h" 107 #include "check-integrity.h" 108 #include "rcu-string.h" 109 #include "compression.h" 110 111 #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 112 #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 113 #define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100 114 #define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051 115 #define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807 116 #define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530 117 #define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300 118 #define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters, 119 * excluding " [...]" */ 120 #define BTRFSIC_GENERATION_UNKNOWN ((u64)-1) 121 122 /* 123 * The definition of the bitmask fields for the print_mask. 124 * They are specified with the mount option check_integrity_print_mask. 125 */ 126 #define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE 0x00000001 127 #define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION 0x00000002 128 #define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE 0x00000004 129 #define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE 0x00000008 130 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH 0x00000010 131 #define BTRFSIC_PRINT_MASK_END_IO_BIO_BH 0x00000020 132 #define BTRFSIC_PRINT_MASK_VERBOSE 0x00000040 133 #define BTRFSIC_PRINT_MASK_VERY_VERBOSE 0x00000080 134 #define BTRFSIC_PRINT_MASK_INITIAL_TREE 0x00000100 135 #define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES 0x00000200 136 #define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400 137 #define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800 138 #define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000 139 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE 0x00002000 140 141 struct btrfsic_dev_state; 142 struct btrfsic_state; 143 144 struct btrfsic_block { 145 u32 magic_num; /* only used for debug purposes */ 146 unsigned int is_metadata:1; /* if it is meta-data, not data-data */ 147 unsigned int is_superblock:1; /* if it is one of the superblocks */ 148 unsigned int is_iodone:1; /* if is done by lower subsystem */ 149 unsigned int iodone_w_error:1; /* error was indicated to endio */ 150 unsigned int never_written:1; /* block was added because it was 151 * referenced, not because it was 152 * written */ 153 unsigned int mirror_num; /* large enough to hold 154 * BTRFS_SUPER_MIRROR_MAX */ 155 struct btrfsic_dev_state *dev_state; 156 u64 dev_bytenr; /* key, physical byte num on disk */ 157 u64 logical_bytenr; /* logical byte num on disk */ 158 u64 generation; 159 struct btrfs_disk_key disk_key; /* extra info to print in case of 160 * issues, will not always be correct */ 161 struct list_head collision_resolving_node; /* list node */ 162 struct list_head all_blocks_node; /* list node */ 163 164 /* the following two lists contain block_link items */ 165 struct list_head ref_to_list; /* list */ 166 struct list_head ref_from_list; /* list */ 167 struct btrfsic_block *next_in_same_bio; 168 void *orig_bio_bh_private; 169 union { 170 bio_end_io_t *bio; 171 bh_end_io_t *bh; 172 } orig_bio_bh_end_io; 173 int submit_bio_bh_rw; 174 u64 flush_gen; /* only valid if !never_written */ 175 }; 176 177 /* 178 * Elements of this type are allocated dynamically and required because 179 * each block object can refer to and can be ref from multiple blocks. 180 * The key to lookup them in the hashtable is the dev_bytenr of 181 * the block ref to plus the one from the block referred from. 182 * The fact that they are searchable via a hashtable and that a 183 * ref_cnt is maintained is not required for the btrfs integrity 184 * check algorithm itself, it is only used to make the output more 185 * beautiful in case that an error is detected (an error is defined 186 * as a write operation to a block while that block is still referenced). 187 */ 188 struct btrfsic_block_link { 189 u32 magic_num; /* only used for debug purposes */ 190 u32 ref_cnt; 191 struct list_head node_ref_to; /* list node */ 192 struct list_head node_ref_from; /* list node */ 193 struct list_head collision_resolving_node; /* list node */ 194 struct btrfsic_block *block_ref_to; 195 struct btrfsic_block *block_ref_from; 196 u64 parent_generation; 197 }; 198 199 struct btrfsic_dev_state { 200 u32 magic_num; /* only used for debug purposes */ 201 struct block_device *bdev; 202 struct btrfsic_state *state; 203 struct list_head collision_resolving_node; /* list node */ 204 struct btrfsic_block dummy_block_for_bio_bh_flush; 205 u64 last_flush_gen; 206 char name[BDEVNAME_SIZE]; 207 }; 208 209 struct btrfsic_block_hashtable { 210 struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE]; 211 }; 212 213 struct btrfsic_block_link_hashtable { 214 struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE]; 215 }; 216 217 struct btrfsic_dev_state_hashtable { 218 struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE]; 219 }; 220 221 struct btrfsic_block_data_ctx { 222 u64 start; /* virtual bytenr */ 223 u64 dev_bytenr; /* physical bytenr on device */ 224 u32 len; 225 struct btrfsic_dev_state *dev; 226 char **datav; 227 struct page **pagev; 228 void *mem_to_free; 229 }; 230 231 /* This structure is used to implement recursion without occupying 232 * any stack space, refer to btrfsic_process_metablock() */ 233 struct btrfsic_stack_frame { 234 u32 magic; 235 u32 nr; 236 int error; 237 int i; 238 int limit_nesting; 239 int num_copies; 240 int mirror_num; 241 struct btrfsic_block *block; 242 struct btrfsic_block_data_ctx *block_ctx; 243 struct btrfsic_block *next_block; 244 struct btrfsic_block_data_ctx next_block_ctx; 245 struct btrfs_header *hdr; 246 struct btrfsic_stack_frame *prev; 247 }; 248 249 /* Some state per mounted filesystem */ 250 struct btrfsic_state { 251 u32 print_mask; 252 int include_extent_data; 253 int csum_size; 254 struct list_head all_blocks_list; 255 struct btrfsic_block_hashtable block_hashtable; 256 struct btrfsic_block_link_hashtable block_link_hashtable; 257 struct btrfs_fs_info *fs_info; 258 u64 max_superblock_generation; 259 struct btrfsic_block *latest_superblock; 260 u32 metablock_size; 261 u32 datablock_size; 262 }; 263 264 static void btrfsic_block_init(struct btrfsic_block *b); 265 static struct btrfsic_block *btrfsic_block_alloc(void); 266 static void btrfsic_block_free(struct btrfsic_block *b); 267 static void btrfsic_block_link_init(struct btrfsic_block_link *n); 268 static struct btrfsic_block_link *btrfsic_block_link_alloc(void); 269 static void btrfsic_block_link_free(struct btrfsic_block_link *n); 270 static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds); 271 static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void); 272 static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds); 273 static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h); 274 static void btrfsic_block_hashtable_add(struct btrfsic_block *b, 275 struct btrfsic_block_hashtable *h); 276 static void btrfsic_block_hashtable_remove(struct btrfsic_block *b); 277 static struct btrfsic_block *btrfsic_block_hashtable_lookup( 278 struct block_device *bdev, 279 u64 dev_bytenr, 280 struct btrfsic_block_hashtable *h); 281 static void btrfsic_block_link_hashtable_init( 282 struct btrfsic_block_link_hashtable *h); 283 static void btrfsic_block_link_hashtable_add( 284 struct btrfsic_block_link *l, 285 struct btrfsic_block_link_hashtable *h); 286 static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l); 287 static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup( 288 struct block_device *bdev_ref_to, 289 u64 dev_bytenr_ref_to, 290 struct block_device *bdev_ref_from, 291 u64 dev_bytenr_ref_from, 292 struct btrfsic_block_link_hashtable *h); 293 static void btrfsic_dev_state_hashtable_init( 294 struct btrfsic_dev_state_hashtable *h); 295 static void btrfsic_dev_state_hashtable_add( 296 struct btrfsic_dev_state *ds, 297 struct btrfsic_dev_state_hashtable *h); 298 static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds); 299 static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( 300 struct block_device *bdev, 301 struct btrfsic_dev_state_hashtable *h); 302 static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void); 303 static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf); 304 static int btrfsic_process_superblock(struct btrfsic_state *state, 305 struct btrfs_fs_devices *fs_devices); 306 static int btrfsic_process_metablock(struct btrfsic_state *state, 307 struct btrfsic_block *block, 308 struct btrfsic_block_data_ctx *block_ctx, 309 int limit_nesting, int force_iodone_flag); 310 static void btrfsic_read_from_block_data( 311 struct btrfsic_block_data_ctx *block_ctx, 312 void *dst, u32 offset, size_t len); 313 static int btrfsic_create_link_to_next_block( 314 struct btrfsic_state *state, 315 struct btrfsic_block *block, 316 struct btrfsic_block_data_ctx 317 *block_ctx, u64 next_bytenr, 318 int limit_nesting, 319 struct btrfsic_block_data_ctx *next_block_ctx, 320 struct btrfsic_block **next_blockp, 321 int force_iodone_flag, 322 int *num_copiesp, int *mirror_nump, 323 struct btrfs_disk_key *disk_key, 324 u64 parent_generation); 325 static int btrfsic_handle_extent_data(struct btrfsic_state *state, 326 struct btrfsic_block *block, 327 struct btrfsic_block_data_ctx *block_ctx, 328 u32 item_offset, int force_iodone_flag); 329 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 330 struct btrfsic_block_data_ctx *block_ctx_out, 331 int mirror_num); 332 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx); 333 static int btrfsic_read_block(struct btrfsic_state *state, 334 struct btrfsic_block_data_ctx *block_ctx); 335 static void btrfsic_dump_database(struct btrfsic_state *state); 336 static int btrfsic_test_for_metadata(struct btrfsic_state *state, 337 char **datav, unsigned int num_pages); 338 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 339 u64 dev_bytenr, char **mapped_datav, 340 unsigned int num_pages, 341 struct bio *bio, int *bio_is_patched, 342 struct buffer_head *bh, 343 int submit_bio_bh_rw); 344 static int btrfsic_process_written_superblock( 345 struct btrfsic_state *state, 346 struct btrfsic_block *const block, 347 struct btrfs_super_block *const super_hdr); 348 static void btrfsic_bio_end_io(struct bio *bp); 349 static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate); 350 static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state, 351 const struct btrfsic_block *block, 352 int recursion_level); 353 static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, 354 struct btrfsic_block *const block, 355 int recursion_level); 356 static void btrfsic_print_add_link(const struct btrfsic_state *state, 357 const struct btrfsic_block_link *l); 358 static void btrfsic_print_rem_link(const struct btrfsic_state *state, 359 const struct btrfsic_block_link *l); 360 static char btrfsic_get_block_type(const struct btrfsic_state *state, 361 const struct btrfsic_block *block); 362 static void btrfsic_dump_tree(const struct btrfsic_state *state); 363 static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, 364 const struct btrfsic_block *block, 365 int indent_level); 366 static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( 367 struct btrfsic_state *state, 368 struct btrfsic_block_data_ctx *next_block_ctx, 369 struct btrfsic_block *next_block, 370 struct btrfsic_block *from_block, 371 u64 parent_generation); 372 static struct btrfsic_block *btrfsic_block_lookup_or_add( 373 struct btrfsic_state *state, 374 struct btrfsic_block_data_ctx *block_ctx, 375 const char *additional_string, 376 int is_metadata, 377 int is_iodone, 378 int never_written, 379 int mirror_num, 380 int *was_created); 381 static int btrfsic_process_superblock_dev_mirror( 382 struct btrfsic_state *state, 383 struct btrfsic_dev_state *dev_state, 384 struct btrfs_device *device, 385 int superblock_mirror_num, 386 struct btrfsic_dev_state **selected_dev_state, 387 struct btrfs_super_block *selected_super); 388 static struct btrfsic_dev_state *btrfsic_dev_state_lookup( 389 struct block_device *bdev); 390 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 391 u64 bytenr, 392 struct btrfsic_dev_state *dev_state, 393 u64 dev_bytenr); 394 395 static struct mutex btrfsic_mutex; 396 static int btrfsic_is_initialized; 397 static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable; 398 399 400 static void btrfsic_block_init(struct btrfsic_block *b) 401 { 402 b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER; 403 b->dev_state = NULL; 404 b->dev_bytenr = 0; 405 b->logical_bytenr = 0; 406 b->generation = BTRFSIC_GENERATION_UNKNOWN; 407 b->disk_key.objectid = 0; 408 b->disk_key.type = 0; 409 b->disk_key.offset = 0; 410 b->is_metadata = 0; 411 b->is_superblock = 0; 412 b->is_iodone = 0; 413 b->iodone_w_error = 0; 414 b->never_written = 0; 415 b->mirror_num = 0; 416 b->next_in_same_bio = NULL; 417 b->orig_bio_bh_private = NULL; 418 b->orig_bio_bh_end_io.bio = NULL; 419 INIT_LIST_HEAD(&b->collision_resolving_node); 420 INIT_LIST_HEAD(&b->all_blocks_node); 421 INIT_LIST_HEAD(&b->ref_to_list); 422 INIT_LIST_HEAD(&b->ref_from_list); 423 b->submit_bio_bh_rw = 0; 424 b->flush_gen = 0; 425 } 426 427 static struct btrfsic_block *btrfsic_block_alloc(void) 428 { 429 struct btrfsic_block *b; 430 431 b = kzalloc(sizeof(*b), GFP_NOFS); 432 if (NULL != b) 433 btrfsic_block_init(b); 434 435 return b; 436 } 437 438 static void btrfsic_block_free(struct btrfsic_block *b) 439 { 440 BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num)); 441 kfree(b); 442 } 443 444 static void btrfsic_block_link_init(struct btrfsic_block_link *l) 445 { 446 l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER; 447 l->ref_cnt = 1; 448 INIT_LIST_HEAD(&l->node_ref_to); 449 INIT_LIST_HEAD(&l->node_ref_from); 450 INIT_LIST_HEAD(&l->collision_resolving_node); 451 l->block_ref_to = NULL; 452 l->block_ref_from = NULL; 453 } 454 455 static struct btrfsic_block_link *btrfsic_block_link_alloc(void) 456 { 457 struct btrfsic_block_link *l; 458 459 l = kzalloc(sizeof(*l), GFP_NOFS); 460 if (NULL != l) 461 btrfsic_block_link_init(l); 462 463 return l; 464 } 465 466 static void btrfsic_block_link_free(struct btrfsic_block_link *l) 467 { 468 BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num)); 469 kfree(l); 470 } 471 472 static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds) 473 { 474 ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER; 475 ds->bdev = NULL; 476 ds->state = NULL; 477 ds->name[0] = '\0'; 478 INIT_LIST_HEAD(&ds->collision_resolving_node); 479 ds->last_flush_gen = 0; 480 btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush); 481 ds->dummy_block_for_bio_bh_flush.is_iodone = 1; 482 ds->dummy_block_for_bio_bh_flush.dev_state = ds; 483 } 484 485 static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void) 486 { 487 struct btrfsic_dev_state *ds; 488 489 ds = kzalloc(sizeof(*ds), GFP_NOFS); 490 if (NULL != ds) 491 btrfsic_dev_state_init(ds); 492 493 return ds; 494 } 495 496 static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds) 497 { 498 BUG_ON(!(NULL == ds || 499 BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num)); 500 kfree(ds); 501 } 502 503 static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h) 504 { 505 int i; 506 507 for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++) 508 INIT_LIST_HEAD(h->table + i); 509 } 510 511 static void btrfsic_block_hashtable_add(struct btrfsic_block *b, 512 struct btrfsic_block_hashtable *h) 513 { 514 const unsigned int hashval = 515 (((unsigned int)(b->dev_bytenr >> 16)) ^ 516 ((unsigned int)((uintptr_t)b->dev_state->bdev))) & 517 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); 518 519 list_add(&b->collision_resolving_node, h->table + hashval); 520 } 521 522 static void btrfsic_block_hashtable_remove(struct btrfsic_block *b) 523 { 524 list_del(&b->collision_resolving_node); 525 } 526 527 static struct btrfsic_block *btrfsic_block_hashtable_lookup( 528 struct block_device *bdev, 529 u64 dev_bytenr, 530 struct btrfsic_block_hashtable *h) 531 { 532 const unsigned int hashval = 533 (((unsigned int)(dev_bytenr >> 16)) ^ 534 ((unsigned int)((uintptr_t)bdev))) & 535 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); 536 struct btrfsic_block *b; 537 538 list_for_each_entry(b, h->table + hashval, collision_resolving_node) { 539 if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr) 540 return b; 541 } 542 543 return NULL; 544 } 545 546 static void btrfsic_block_link_hashtable_init( 547 struct btrfsic_block_link_hashtable *h) 548 { 549 int i; 550 551 for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++) 552 INIT_LIST_HEAD(h->table + i); 553 } 554 555 static void btrfsic_block_link_hashtable_add( 556 struct btrfsic_block_link *l, 557 struct btrfsic_block_link_hashtable *h) 558 { 559 const unsigned int hashval = 560 (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^ 561 ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^ 562 ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^ 563 ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev))) 564 & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); 565 566 BUG_ON(NULL == l->block_ref_to); 567 BUG_ON(NULL == l->block_ref_from); 568 list_add(&l->collision_resolving_node, h->table + hashval); 569 } 570 571 static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l) 572 { 573 list_del(&l->collision_resolving_node); 574 } 575 576 static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup( 577 struct block_device *bdev_ref_to, 578 u64 dev_bytenr_ref_to, 579 struct block_device *bdev_ref_from, 580 u64 dev_bytenr_ref_from, 581 struct btrfsic_block_link_hashtable *h) 582 { 583 const unsigned int hashval = 584 (((unsigned int)(dev_bytenr_ref_to >> 16)) ^ 585 ((unsigned int)(dev_bytenr_ref_from >> 16)) ^ 586 ((unsigned int)((uintptr_t)bdev_ref_to)) ^ 587 ((unsigned int)((uintptr_t)bdev_ref_from))) & 588 (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); 589 struct btrfsic_block_link *l; 590 591 list_for_each_entry(l, h->table + hashval, collision_resolving_node) { 592 BUG_ON(NULL == l->block_ref_to); 593 BUG_ON(NULL == l->block_ref_from); 594 if (l->block_ref_to->dev_state->bdev == bdev_ref_to && 595 l->block_ref_to->dev_bytenr == dev_bytenr_ref_to && 596 l->block_ref_from->dev_state->bdev == bdev_ref_from && 597 l->block_ref_from->dev_bytenr == dev_bytenr_ref_from) 598 return l; 599 } 600 601 return NULL; 602 } 603 604 static void btrfsic_dev_state_hashtable_init( 605 struct btrfsic_dev_state_hashtable *h) 606 { 607 int i; 608 609 for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++) 610 INIT_LIST_HEAD(h->table + i); 611 } 612 613 static void btrfsic_dev_state_hashtable_add( 614 struct btrfsic_dev_state *ds, 615 struct btrfsic_dev_state_hashtable *h) 616 { 617 const unsigned int hashval = 618 (((unsigned int)((uintptr_t)ds->bdev)) & 619 (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1)); 620 621 list_add(&ds->collision_resolving_node, h->table + hashval); 622 } 623 624 static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds) 625 { 626 list_del(&ds->collision_resolving_node); 627 } 628 629 static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( 630 struct block_device *bdev, 631 struct btrfsic_dev_state_hashtable *h) 632 { 633 const unsigned int hashval = 634 (((unsigned int)((uintptr_t)bdev)) & 635 (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1)); 636 struct btrfsic_dev_state *ds; 637 638 list_for_each_entry(ds, h->table + hashval, collision_resolving_node) { 639 if (ds->bdev == bdev) 640 return ds; 641 } 642 643 return NULL; 644 } 645 646 static int btrfsic_process_superblock(struct btrfsic_state *state, 647 struct btrfs_fs_devices *fs_devices) 648 { 649 struct btrfs_fs_info *fs_info = state->fs_info; 650 struct btrfs_super_block *selected_super; 651 struct list_head *dev_head = &fs_devices->devices; 652 struct btrfs_device *device; 653 struct btrfsic_dev_state *selected_dev_state = NULL; 654 int ret = 0; 655 int pass; 656 657 BUG_ON(NULL == state); 658 selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS); 659 if (NULL == selected_super) { 660 pr_info("btrfsic: error, kmalloc failed!\n"); 661 return -ENOMEM; 662 } 663 664 list_for_each_entry(device, dev_head, dev_list) { 665 int i; 666 struct btrfsic_dev_state *dev_state; 667 668 if (!device->bdev || !device->name) 669 continue; 670 671 dev_state = btrfsic_dev_state_lookup(device->bdev); 672 BUG_ON(NULL == dev_state); 673 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 674 ret = btrfsic_process_superblock_dev_mirror( 675 state, dev_state, device, i, 676 &selected_dev_state, selected_super); 677 if (0 != ret && 0 == i) { 678 kfree(selected_super); 679 return ret; 680 } 681 } 682 } 683 684 if (NULL == state->latest_superblock) { 685 pr_info("btrfsic: no superblock found!\n"); 686 kfree(selected_super); 687 return -1; 688 } 689 690 state->csum_size = btrfs_super_csum_size(selected_super); 691 692 for (pass = 0; pass < 3; pass++) { 693 int num_copies; 694 int mirror_num; 695 u64 next_bytenr; 696 697 switch (pass) { 698 case 0: 699 next_bytenr = btrfs_super_root(selected_super); 700 if (state->print_mask & 701 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 702 pr_info("root@%llu\n", next_bytenr); 703 break; 704 case 1: 705 next_bytenr = btrfs_super_chunk_root(selected_super); 706 if (state->print_mask & 707 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 708 pr_info("chunk@%llu\n", next_bytenr); 709 break; 710 case 2: 711 next_bytenr = btrfs_super_log_root(selected_super); 712 if (0 == next_bytenr) 713 continue; 714 if (state->print_mask & 715 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 716 pr_info("log@%llu\n", next_bytenr); 717 break; 718 } 719 720 num_copies = btrfs_num_copies(fs_info, next_bytenr, 721 state->metablock_size); 722 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 723 pr_info("num_copies(log_bytenr=%llu) = %d\n", 724 next_bytenr, num_copies); 725 726 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 727 struct btrfsic_block *next_block; 728 struct btrfsic_block_data_ctx tmp_next_block_ctx; 729 struct btrfsic_block_link *l; 730 731 ret = btrfsic_map_block(state, next_bytenr, 732 state->metablock_size, 733 &tmp_next_block_ctx, 734 mirror_num); 735 if (ret) { 736 pr_info("btrfsic: btrfsic_map_block(root @%llu, mirror %d) failed!\n", 737 next_bytenr, mirror_num); 738 kfree(selected_super); 739 return -1; 740 } 741 742 next_block = btrfsic_block_hashtable_lookup( 743 tmp_next_block_ctx.dev->bdev, 744 tmp_next_block_ctx.dev_bytenr, 745 &state->block_hashtable); 746 BUG_ON(NULL == next_block); 747 748 l = btrfsic_block_link_hashtable_lookup( 749 tmp_next_block_ctx.dev->bdev, 750 tmp_next_block_ctx.dev_bytenr, 751 state->latest_superblock->dev_state-> 752 bdev, 753 state->latest_superblock->dev_bytenr, 754 &state->block_link_hashtable); 755 BUG_ON(NULL == l); 756 757 ret = btrfsic_read_block(state, &tmp_next_block_ctx); 758 if (ret < (int)PAGE_SIZE) { 759 pr_info("btrfsic: read @logical %llu failed!\n", 760 tmp_next_block_ctx.start); 761 btrfsic_release_block_ctx(&tmp_next_block_ctx); 762 kfree(selected_super); 763 return -1; 764 } 765 766 ret = btrfsic_process_metablock(state, 767 next_block, 768 &tmp_next_block_ctx, 769 BTRFS_MAX_LEVEL + 3, 1); 770 btrfsic_release_block_ctx(&tmp_next_block_ctx); 771 } 772 } 773 774 kfree(selected_super); 775 return ret; 776 } 777 778 static int btrfsic_process_superblock_dev_mirror( 779 struct btrfsic_state *state, 780 struct btrfsic_dev_state *dev_state, 781 struct btrfs_device *device, 782 int superblock_mirror_num, 783 struct btrfsic_dev_state **selected_dev_state, 784 struct btrfs_super_block *selected_super) 785 { 786 struct btrfs_fs_info *fs_info = state->fs_info; 787 struct btrfs_super_block *super_tmp; 788 u64 dev_bytenr; 789 struct buffer_head *bh; 790 struct btrfsic_block *superblock_tmp; 791 int pass; 792 struct block_device *const superblock_bdev = device->bdev; 793 794 /* super block bytenr is always the unmapped device bytenr */ 795 dev_bytenr = btrfs_sb_offset(superblock_mirror_num); 796 if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->commit_total_bytes) 797 return -1; 798 bh = __bread(superblock_bdev, dev_bytenr / 4096, 799 BTRFS_SUPER_INFO_SIZE); 800 if (NULL == bh) 801 return -1; 802 super_tmp = (struct btrfs_super_block *) 803 (bh->b_data + (dev_bytenr & 4095)); 804 805 if (btrfs_super_bytenr(super_tmp) != dev_bytenr || 806 btrfs_super_magic(super_tmp) != BTRFS_MAGIC || 807 memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) || 808 btrfs_super_nodesize(super_tmp) != state->metablock_size || 809 btrfs_super_sectorsize(super_tmp) != state->datablock_size) { 810 brelse(bh); 811 return 0; 812 } 813 814 superblock_tmp = 815 btrfsic_block_hashtable_lookup(superblock_bdev, 816 dev_bytenr, 817 &state->block_hashtable); 818 if (NULL == superblock_tmp) { 819 superblock_tmp = btrfsic_block_alloc(); 820 if (NULL == superblock_tmp) { 821 pr_info("btrfsic: error, kmalloc failed!\n"); 822 brelse(bh); 823 return -1; 824 } 825 /* for superblock, only the dev_bytenr makes sense */ 826 superblock_tmp->dev_bytenr = dev_bytenr; 827 superblock_tmp->dev_state = dev_state; 828 superblock_tmp->logical_bytenr = dev_bytenr; 829 superblock_tmp->generation = btrfs_super_generation(super_tmp); 830 superblock_tmp->is_metadata = 1; 831 superblock_tmp->is_superblock = 1; 832 superblock_tmp->is_iodone = 1; 833 superblock_tmp->never_written = 0; 834 superblock_tmp->mirror_num = 1 + superblock_mirror_num; 835 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 836 btrfs_info_in_rcu(fs_info, 837 "new initial S-block (bdev %p, %s) @%llu (%s/%llu/%d)", 838 superblock_bdev, 839 rcu_str_deref(device->name), dev_bytenr, 840 dev_state->name, dev_bytenr, 841 superblock_mirror_num); 842 list_add(&superblock_tmp->all_blocks_node, 843 &state->all_blocks_list); 844 btrfsic_block_hashtable_add(superblock_tmp, 845 &state->block_hashtable); 846 } 847 848 /* select the one with the highest generation field */ 849 if (btrfs_super_generation(super_tmp) > 850 state->max_superblock_generation || 851 0 == state->max_superblock_generation) { 852 memcpy(selected_super, super_tmp, sizeof(*selected_super)); 853 *selected_dev_state = dev_state; 854 state->max_superblock_generation = 855 btrfs_super_generation(super_tmp); 856 state->latest_superblock = superblock_tmp; 857 } 858 859 for (pass = 0; pass < 3; pass++) { 860 u64 next_bytenr; 861 int num_copies; 862 int mirror_num; 863 const char *additional_string = NULL; 864 struct btrfs_disk_key tmp_disk_key; 865 866 tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY; 867 tmp_disk_key.offset = 0; 868 switch (pass) { 869 case 0: 870 btrfs_set_disk_key_objectid(&tmp_disk_key, 871 BTRFS_ROOT_TREE_OBJECTID); 872 additional_string = "initial root "; 873 next_bytenr = btrfs_super_root(super_tmp); 874 break; 875 case 1: 876 btrfs_set_disk_key_objectid(&tmp_disk_key, 877 BTRFS_CHUNK_TREE_OBJECTID); 878 additional_string = "initial chunk "; 879 next_bytenr = btrfs_super_chunk_root(super_tmp); 880 break; 881 case 2: 882 btrfs_set_disk_key_objectid(&tmp_disk_key, 883 BTRFS_TREE_LOG_OBJECTID); 884 additional_string = "initial log "; 885 next_bytenr = btrfs_super_log_root(super_tmp); 886 if (0 == next_bytenr) 887 continue; 888 break; 889 } 890 891 num_copies = btrfs_num_copies(fs_info, next_bytenr, 892 state->metablock_size); 893 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 894 pr_info("num_copies(log_bytenr=%llu) = %d\n", 895 next_bytenr, num_copies); 896 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 897 struct btrfsic_block *next_block; 898 struct btrfsic_block_data_ctx tmp_next_block_ctx; 899 struct btrfsic_block_link *l; 900 901 if (btrfsic_map_block(state, next_bytenr, 902 state->metablock_size, 903 &tmp_next_block_ctx, 904 mirror_num)) { 905 pr_info("btrfsic: btrfsic_map_block(bytenr @%llu, mirror %d) failed!\n", 906 next_bytenr, mirror_num); 907 brelse(bh); 908 return -1; 909 } 910 911 next_block = btrfsic_block_lookup_or_add( 912 state, &tmp_next_block_ctx, 913 additional_string, 1, 1, 0, 914 mirror_num, NULL); 915 if (NULL == next_block) { 916 btrfsic_release_block_ctx(&tmp_next_block_ctx); 917 brelse(bh); 918 return -1; 919 } 920 921 next_block->disk_key = tmp_disk_key; 922 next_block->generation = BTRFSIC_GENERATION_UNKNOWN; 923 l = btrfsic_block_link_lookup_or_add( 924 state, &tmp_next_block_ctx, 925 next_block, superblock_tmp, 926 BTRFSIC_GENERATION_UNKNOWN); 927 btrfsic_release_block_ctx(&tmp_next_block_ctx); 928 if (NULL == l) { 929 brelse(bh); 930 return -1; 931 } 932 } 933 } 934 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES) 935 btrfsic_dump_tree_sub(state, superblock_tmp, 0); 936 937 brelse(bh); 938 return 0; 939 } 940 941 static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void) 942 { 943 struct btrfsic_stack_frame *sf; 944 945 sf = kzalloc(sizeof(*sf), GFP_NOFS); 946 if (NULL == sf) 947 pr_info("btrfsic: alloc memory failed!\n"); 948 else 949 sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER; 950 return sf; 951 } 952 953 static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf) 954 { 955 BUG_ON(!(NULL == sf || 956 BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic)); 957 kfree(sf); 958 } 959 960 static int btrfsic_process_metablock( 961 struct btrfsic_state *state, 962 struct btrfsic_block *const first_block, 963 struct btrfsic_block_data_ctx *const first_block_ctx, 964 int first_limit_nesting, int force_iodone_flag) 965 { 966 struct btrfsic_stack_frame initial_stack_frame = { 0 }; 967 struct btrfsic_stack_frame *sf; 968 struct btrfsic_stack_frame *next_stack; 969 struct btrfs_header *const first_hdr = 970 (struct btrfs_header *)first_block_ctx->datav[0]; 971 972 BUG_ON(!first_hdr); 973 sf = &initial_stack_frame; 974 sf->error = 0; 975 sf->i = -1; 976 sf->limit_nesting = first_limit_nesting; 977 sf->block = first_block; 978 sf->block_ctx = first_block_ctx; 979 sf->next_block = NULL; 980 sf->hdr = first_hdr; 981 sf->prev = NULL; 982 983 continue_with_new_stack_frame: 984 sf->block->generation = le64_to_cpu(sf->hdr->generation); 985 if (0 == sf->hdr->level) { 986 struct btrfs_leaf *const leafhdr = 987 (struct btrfs_leaf *)sf->hdr; 988 989 if (-1 == sf->i) { 990 sf->nr = btrfs_stack_header_nritems(&leafhdr->header); 991 992 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 993 pr_info("leaf %llu items %d generation %llu owner %llu\n", 994 sf->block_ctx->start, sf->nr, 995 btrfs_stack_header_generation( 996 &leafhdr->header), 997 btrfs_stack_header_owner( 998 &leafhdr->header)); 999 } 1000 1001 continue_with_current_leaf_stack_frame: 1002 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { 1003 sf->i++; 1004 sf->num_copies = 0; 1005 } 1006 1007 if (sf->i < sf->nr) { 1008 struct btrfs_item disk_item; 1009 u32 disk_item_offset = 1010 (uintptr_t)(leafhdr->items + sf->i) - 1011 (uintptr_t)leafhdr; 1012 struct btrfs_disk_key *disk_key; 1013 u8 type; 1014 u32 item_offset; 1015 u32 item_size; 1016 1017 if (disk_item_offset + sizeof(struct btrfs_item) > 1018 sf->block_ctx->len) { 1019 leaf_item_out_of_bounce_error: 1020 pr_info("btrfsic: leaf item out of bounce at logical %llu, dev %s\n", 1021 sf->block_ctx->start, 1022 sf->block_ctx->dev->name); 1023 goto one_stack_frame_backwards; 1024 } 1025 btrfsic_read_from_block_data(sf->block_ctx, 1026 &disk_item, 1027 disk_item_offset, 1028 sizeof(struct btrfs_item)); 1029 item_offset = btrfs_stack_item_offset(&disk_item); 1030 item_size = btrfs_stack_item_size(&disk_item); 1031 disk_key = &disk_item.key; 1032 type = btrfs_disk_key_type(disk_key); 1033 1034 if (BTRFS_ROOT_ITEM_KEY == type) { 1035 struct btrfs_root_item root_item; 1036 u32 root_item_offset; 1037 u64 next_bytenr; 1038 1039 root_item_offset = item_offset + 1040 offsetof(struct btrfs_leaf, items); 1041 if (root_item_offset + item_size > 1042 sf->block_ctx->len) 1043 goto leaf_item_out_of_bounce_error; 1044 btrfsic_read_from_block_data( 1045 sf->block_ctx, &root_item, 1046 root_item_offset, 1047 item_size); 1048 next_bytenr = btrfs_root_bytenr(&root_item); 1049 1050 sf->error = 1051 btrfsic_create_link_to_next_block( 1052 state, 1053 sf->block, 1054 sf->block_ctx, 1055 next_bytenr, 1056 sf->limit_nesting, 1057 &sf->next_block_ctx, 1058 &sf->next_block, 1059 force_iodone_flag, 1060 &sf->num_copies, 1061 &sf->mirror_num, 1062 disk_key, 1063 btrfs_root_generation( 1064 &root_item)); 1065 if (sf->error) 1066 goto one_stack_frame_backwards; 1067 1068 if (NULL != sf->next_block) { 1069 struct btrfs_header *const next_hdr = 1070 (struct btrfs_header *) 1071 sf->next_block_ctx.datav[0]; 1072 1073 next_stack = 1074 btrfsic_stack_frame_alloc(); 1075 if (NULL == next_stack) { 1076 sf->error = -1; 1077 btrfsic_release_block_ctx( 1078 &sf-> 1079 next_block_ctx); 1080 goto one_stack_frame_backwards; 1081 } 1082 1083 next_stack->i = -1; 1084 next_stack->block = sf->next_block; 1085 next_stack->block_ctx = 1086 &sf->next_block_ctx; 1087 next_stack->next_block = NULL; 1088 next_stack->hdr = next_hdr; 1089 next_stack->limit_nesting = 1090 sf->limit_nesting - 1; 1091 next_stack->prev = sf; 1092 sf = next_stack; 1093 goto continue_with_new_stack_frame; 1094 } 1095 } else if (BTRFS_EXTENT_DATA_KEY == type && 1096 state->include_extent_data) { 1097 sf->error = btrfsic_handle_extent_data( 1098 state, 1099 sf->block, 1100 sf->block_ctx, 1101 item_offset, 1102 force_iodone_flag); 1103 if (sf->error) 1104 goto one_stack_frame_backwards; 1105 } 1106 1107 goto continue_with_current_leaf_stack_frame; 1108 } 1109 } else { 1110 struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr; 1111 1112 if (-1 == sf->i) { 1113 sf->nr = btrfs_stack_header_nritems(&nodehdr->header); 1114 1115 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1116 pr_info("node %llu level %d items %d generation %llu owner %llu\n", 1117 sf->block_ctx->start, 1118 nodehdr->header.level, sf->nr, 1119 btrfs_stack_header_generation( 1120 &nodehdr->header), 1121 btrfs_stack_header_owner( 1122 &nodehdr->header)); 1123 } 1124 1125 continue_with_current_node_stack_frame: 1126 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { 1127 sf->i++; 1128 sf->num_copies = 0; 1129 } 1130 1131 if (sf->i < sf->nr) { 1132 struct btrfs_key_ptr key_ptr; 1133 u32 key_ptr_offset; 1134 u64 next_bytenr; 1135 1136 key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) - 1137 (uintptr_t)nodehdr; 1138 if (key_ptr_offset + sizeof(struct btrfs_key_ptr) > 1139 sf->block_ctx->len) { 1140 pr_info("btrfsic: node item out of bounce at logical %llu, dev %s\n", 1141 sf->block_ctx->start, 1142 sf->block_ctx->dev->name); 1143 goto one_stack_frame_backwards; 1144 } 1145 btrfsic_read_from_block_data( 1146 sf->block_ctx, &key_ptr, key_ptr_offset, 1147 sizeof(struct btrfs_key_ptr)); 1148 next_bytenr = btrfs_stack_key_blockptr(&key_ptr); 1149 1150 sf->error = btrfsic_create_link_to_next_block( 1151 state, 1152 sf->block, 1153 sf->block_ctx, 1154 next_bytenr, 1155 sf->limit_nesting, 1156 &sf->next_block_ctx, 1157 &sf->next_block, 1158 force_iodone_flag, 1159 &sf->num_copies, 1160 &sf->mirror_num, 1161 &key_ptr.key, 1162 btrfs_stack_key_generation(&key_ptr)); 1163 if (sf->error) 1164 goto one_stack_frame_backwards; 1165 1166 if (NULL != sf->next_block) { 1167 struct btrfs_header *const next_hdr = 1168 (struct btrfs_header *) 1169 sf->next_block_ctx.datav[0]; 1170 1171 next_stack = btrfsic_stack_frame_alloc(); 1172 if (NULL == next_stack) { 1173 sf->error = -1; 1174 goto one_stack_frame_backwards; 1175 } 1176 1177 next_stack->i = -1; 1178 next_stack->block = sf->next_block; 1179 next_stack->block_ctx = &sf->next_block_ctx; 1180 next_stack->next_block = NULL; 1181 next_stack->hdr = next_hdr; 1182 next_stack->limit_nesting = 1183 sf->limit_nesting - 1; 1184 next_stack->prev = sf; 1185 sf = next_stack; 1186 goto continue_with_new_stack_frame; 1187 } 1188 1189 goto continue_with_current_node_stack_frame; 1190 } 1191 } 1192 1193 one_stack_frame_backwards: 1194 if (NULL != sf->prev) { 1195 struct btrfsic_stack_frame *const prev = sf->prev; 1196 1197 /* the one for the initial block is freed in the caller */ 1198 btrfsic_release_block_ctx(sf->block_ctx); 1199 1200 if (sf->error) { 1201 prev->error = sf->error; 1202 btrfsic_stack_frame_free(sf); 1203 sf = prev; 1204 goto one_stack_frame_backwards; 1205 } 1206 1207 btrfsic_stack_frame_free(sf); 1208 sf = prev; 1209 goto continue_with_new_stack_frame; 1210 } else { 1211 BUG_ON(&initial_stack_frame != sf); 1212 } 1213 1214 return sf->error; 1215 } 1216 1217 static void btrfsic_read_from_block_data( 1218 struct btrfsic_block_data_ctx *block_ctx, 1219 void *dstv, u32 offset, size_t len) 1220 { 1221 size_t cur; 1222 size_t offset_in_page; 1223 char *kaddr; 1224 char *dst = (char *)dstv; 1225 size_t start_offset = block_ctx->start & ((u64)PAGE_SIZE - 1); 1226 unsigned long i = (start_offset + offset) >> PAGE_SHIFT; 1227 1228 WARN_ON(offset + len > block_ctx->len); 1229 offset_in_page = (start_offset + offset) & (PAGE_SIZE - 1); 1230 1231 while (len > 0) { 1232 cur = min(len, ((size_t)PAGE_SIZE - offset_in_page)); 1233 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE)); 1234 kaddr = block_ctx->datav[i]; 1235 memcpy(dst, kaddr + offset_in_page, cur); 1236 1237 dst += cur; 1238 len -= cur; 1239 offset_in_page = 0; 1240 i++; 1241 } 1242 } 1243 1244 static int btrfsic_create_link_to_next_block( 1245 struct btrfsic_state *state, 1246 struct btrfsic_block *block, 1247 struct btrfsic_block_data_ctx *block_ctx, 1248 u64 next_bytenr, 1249 int limit_nesting, 1250 struct btrfsic_block_data_ctx *next_block_ctx, 1251 struct btrfsic_block **next_blockp, 1252 int force_iodone_flag, 1253 int *num_copiesp, int *mirror_nump, 1254 struct btrfs_disk_key *disk_key, 1255 u64 parent_generation) 1256 { 1257 struct btrfs_fs_info *fs_info = state->fs_info; 1258 struct btrfsic_block *next_block = NULL; 1259 int ret; 1260 struct btrfsic_block_link *l; 1261 int did_alloc_block_link; 1262 int block_was_created; 1263 1264 *next_blockp = NULL; 1265 if (0 == *num_copiesp) { 1266 *num_copiesp = btrfs_num_copies(fs_info, next_bytenr, 1267 state->metablock_size); 1268 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1269 pr_info("num_copies(log_bytenr=%llu) = %d\n", 1270 next_bytenr, *num_copiesp); 1271 *mirror_nump = 1; 1272 } 1273 1274 if (*mirror_nump > *num_copiesp) 1275 return 0; 1276 1277 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1278 pr_info("btrfsic_create_link_to_next_block(mirror_num=%d)\n", 1279 *mirror_nump); 1280 ret = btrfsic_map_block(state, next_bytenr, 1281 state->metablock_size, 1282 next_block_ctx, *mirror_nump); 1283 if (ret) { 1284 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 1285 next_bytenr, *mirror_nump); 1286 btrfsic_release_block_ctx(next_block_ctx); 1287 *next_blockp = NULL; 1288 return -1; 1289 } 1290 1291 next_block = btrfsic_block_lookup_or_add(state, 1292 next_block_ctx, "referenced ", 1293 1, force_iodone_flag, 1294 !force_iodone_flag, 1295 *mirror_nump, 1296 &block_was_created); 1297 if (NULL == next_block) { 1298 btrfsic_release_block_ctx(next_block_ctx); 1299 *next_blockp = NULL; 1300 return -1; 1301 } 1302 if (block_was_created) { 1303 l = NULL; 1304 next_block->generation = BTRFSIC_GENERATION_UNKNOWN; 1305 } else { 1306 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) { 1307 if (next_block->logical_bytenr != next_bytenr && 1308 !(!next_block->is_metadata && 1309 0 == next_block->logical_bytenr)) 1310 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n", 1311 next_bytenr, next_block_ctx->dev->name, 1312 next_block_ctx->dev_bytenr, *mirror_nump, 1313 btrfsic_get_block_type(state, 1314 next_block), 1315 next_block->logical_bytenr); 1316 else 1317 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c.\n", 1318 next_bytenr, next_block_ctx->dev->name, 1319 next_block_ctx->dev_bytenr, *mirror_nump, 1320 btrfsic_get_block_type(state, 1321 next_block)); 1322 } 1323 next_block->logical_bytenr = next_bytenr; 1324 1325 next_block->mirror_num = *mirror_nump; 1326 l = btrfsic_block_link_hashtable_lookup( 1327 next_block_ctx->dev->bdev, 1328 next_block_ctx->dev_bytenr, 1329 block_ctx->dev->bdev, 1330 block_ctx->dev_bytenr, 1331 &state->block_link_hashtable); 1332 } 1333 1334 next_block->disk_key = *disk_key; 1335 if (NULL == l) { 1336 l = btrfsic_block_link_alloc(); 1337 if (NULL == l) { 1338 pr_info("btrfsic: error, kmalloc failed!\n"); 1339 btrfsic_release_block_ctx(next_block_ctx); 1340 *next_blockp = NULL; 1341 return -1; 1342 } 1343 1344 did_alloc_block_link = 1; 1345 l->block_ref_to = next_block; 1346 l->block_ref_from = block; 1347 l->ref_cnt = 1; 1348 l->parent_generation = parent_generation; 1349 1350 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1351 btrfsic_print_add_link(state, l); 1352 1353 list_add(&l->node_ref_to, &block->ref_to_list); 1354 list_add(&l->node_ref_from, &next_block->ref_from_list); 1355 1356 btrfsic_block_link_hashtable_add(l, 1357 &state->block_link_hashtable); 1358 } else { 1359 did_alloc_block_link = 0; 1360 if (0 == limit_nesting) { 1361 l->ref_cnt++; 1362 l->parent_generation = parent_generation; 1363 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1364 btrfsic_print_add_link(state, l); 1365 } 1366 } 1367 1368 if (limit_nesting > 0 && did_alloc_block_link) { 1369 ret = btrfsic_read_block(state, next_block_ctx); 1370 if (ret < (int)next_block_ctx->len) { 1371 pr_info("btrfsic: read block @logical %llu failed!\n", 1372 next_bytenr); 1373 btrfsic_release_block_ctx(next_block_ctx); 1374 *next_blockp = NULL; 1375 return -1; 1376 } 1377 1378 *next_blockp = next_block; 1379 } else { 1380 *next_blockp = NULL; 1381 } 1382 (*mirror_nump)++; 1383 1384 return 0; 1385 } 1386 1387 static int btrfsic_handle_extent_data( 1388 struct btrfsic_state *state, 1389 struct btrfsic_block *block, 1390 struct btrfsic_block_data_ctx *block_ctx, 1391 u32 item_offset, int force_iodone_flag) 1392 { 1393 struct btrfs_fs_info *fs_info = state->fs_info; 1394 struct btrfs_file_extent_item file_extent_item; 1395 u64 file_extent_item_offset; 1396 u64 next_bytenr; 1397 u64 num_bytes; 1398 u64 generation; 1399 struct btrfsic_block_link *l; 1400 int ret; 1401 1402 file_extent_item_offset = offsetof(struct btrfs_leaf, items) + 1403 item_offset; 1404 if (file_extent_item_offset + 1405 offsetof(struct btrfs_file_extent_item, disk_num_bytes) > 1406 block_ctx->len) { 1407 pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n", 1408 block_ctx->start, block_ctx->dev->name); 1409 return -1; 1410 } 1411 1412 btrfsic_read_from_block_data(block_ctx, &file_extent_item, 1413 file_extent_item_offset, 1414 offsetof(struct btrfs_file_extent_item, disk_num_bytes)); 1415 if (BTRFS_FILE_EXTENT_REG != file_extent_item.type || 1416 btrfs_stack_file_extent_disk_bytenr(&file_extent_item) == 0) { 1417 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1418 pr_info("extent_data: type %u, disk_bytenr = %llu\n", 1419 file_extent_item.type, 1420 btrfs_stack_file_extent_disk_bytenr( 1421 &file_extent_item)); 1422 return 0; 1423 } 1424 1425 if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) > 1426 block_ctx->len) { 1427 pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n", 1428 block_ctx->start, block_ctx->dev->name); 1429 return -1; 1430 } 1431 btrfsic_read_from_block_data(block_ctx, &file_extent_item, 1432 file_extent_item_offset, 1433 sizeof(struct btrfs_file_extent_item)); 1434 next_bytenr = btrfs_stack_file_extent_disk_bytenr(&file_extent_item); 1435 if (btrfs_stack_file_extent_compression(&file_extent_item) == 1436 BTRFS_COMPRESS_NONE) { 1437 next_bytenr += btrfs_stack_file_extent_offset(&file_extent_item); 1438 num_bytes = btrfs_stack_file_extent_num_bytes(&file_extent_item); 1439 } else { 1440 num_bytes = btrfs_stack_file_extent_disk_num_bytes(&file_extent_item); 1441 } 1442 generation = btrfs_stack_file_extent_generation(&file_extent_item); 1443 1444 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1445 pr_info("extent_data: type %u, disk_bytenr = %llu, offset = %llu, num_bytes = %llu\n", 1446 file_extent_item.type, 1447 btrfs_stack_file_extent_disk_bytenr(&file_extent_item), 1448 btrfs_stack_file_extent_offset(&file_extent_item), 1449 num_bytes); 1450 while (num_bytes > 0) { 1451 u32 chunk_len; 1452 int num_copies; 1453 int mirror_num; 1454 1455 if (num_bytes > state->datablock_size) 1456 chunk_len = state->datablock_size; 1457 else 1458 chunk_len = num_bytes; 1459 1460 num_copies = btrfs_num_copies(fs_info, next_bytenr, 1461 state->datablock_size); 1462 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1463 pr_info("num_copies(log_bytenr=%llu) = %d\n", 1464 next_bytenr, num_copies); 1465 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 1466 struct btrfsic_block_data_ctx next_block_ctx; 1467 struct btrfsic_block *next_block; 1468 int block_was_created; 1469 1470 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1471 pr_info("btrfsic_handle_extent_data(mirror_num=%d)\n", 1472 mirror_num); 1473 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1474 pr_info("\tdisk_bytenr = %llu, num_bytes %u\n", 1475 next_bytenr, chunk_len); 1476 ret = btrfsic_map_block(state, next_bytenr, 1477 chunk_len, &next_block_ctx, 1478 mirror_num); 1479 if (ret) { 1480 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 1481 next_bytenr, mirror_num); 1482 return -1; 1483 } 1484 1485 next_block = btrfsic_block_lookup_or_add( 1486 state, 1487 &next_block_ctx, 1488 "referenced ", 1489 0, 1490 force_iodone_flag, 1491 !force_iodone_flag, 1492 mirror_num, 1493 &block_was_created); 1494 if (NULL == next_block) { 1495 pr_info("btrfsic: error, kmalloc failed!\n"); 1496 btrfsic_release_block_ctx(&next_block_ctx); 1497 return -1; 1498 } 1499 if (!block_was_created) { 1500 if ((state->print_mask & 1501 BTRFSIC_PRINT_MASK_VERBOSE) && 1502 next_block->logical_bytenr != next_bytenr && 1503 !(!next_block->is_metadata && 1504 0 == next_block->logical_bytenr)) { 1505 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, D, bytenr mismatch (!= stored %llu).\n", 1506 next_bytenr, 1507 next_block_ctx.dev->name, 1508 next_block_ctx.dev_bytenr, 1509 mirror_num, 1510 next_block->logical_bytenr); 1511 } 1512 next_block->logical_bytenr = next_bytenr; 1513 next_block->mirror_num = mirror_num; 1514 } 1515 1516 l = btrfsic_block_link_lookup_or_add(state, 1517 &next_block_ctx, 1518 next_block, block, 1519 generation); 1520 btrfsic_release_block_ctx(&next_block_ctx); 1521 if (NULL == l) 1522 return -1; 1523 } 1524 1525 next_bytenr += chunk_len; 1526 num_bytes -= chunk_len; 1527 } 1528 1529 return 0; 1530 } 1531 1532 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 1533 struct btrfsic_block_data_ctx *block_ctx_out, 1534 int mirror_num) 1535 { 1536 struct btrfs_fs_info *fs_info = state->fs_info; 1537 int ret; 1538 u64 length; 1539 struct btrfs_bio *multi = NULL; 1540 struct btrfs_device *device; 1541 1542 length = len; 1543 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, 1544 bytenr, &length, &multi, mirror_num); 1545 1546 if (ret) { 1547 block_ctx_out->start = 0; 1548 block_ctx_out->dev_bytenr = 0; 1549 block_ctx_out->len = 0; 1550 block_ctx_out->dev = NULL; 1551 block_ctx_out->datav = NULL; 1552 block_ctx_out->pagev = NULL; 1553 block_ctx_out->mem_to_free = NULL; 1554 1555 return ret; 1556 } 1557 1558 device = multi->stripes[0].dev; 1559 block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev); 1560 block_ctx_out->dev_bytenr = multi->stripes[0].physical; 1561 block_ctx_out->start = bytenr; 1562 block_ctx_out->len = len; 1563 block_ctx_out->datav = NULL; 1564 block_ctx_out->pagev = NULL; 1565 block_ctx_out->mem_to_free = NULL; 1566 1567 kfree(multi); 1568 if (NULL == block_ctx_out->dev) { 1569 ret = -ENXIO; 1570 pr_info("btrfsic: error, cannot lookup dev (#1)!\n"); 1571 } 1572 1573 return ret; 1574 } 1575 1576 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx) 1577 { 1578 if (block_ctx->mem_to_free) { 1579 unsigned int num_pages; 1580 1581 BUG_ON(!block_ctx->datav); 1582 BUG_ON(!block_ctx->pagev); 1583 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> 1584 PAGE_SHIFT; 1585 while (num_pages > 0) { 1586 num_pages--; 1587 if (block_ctx->datav[num_pages]) { 1588 kunmap(block_ctx->pagev[num_pages]); 1589 block_ctx->datav[num_pages] = NULL; 1590 } 1591 if (block_ctx->pagev[num_pages]) { 1592 __free_page(block_ctx->pagev[num_pages]); 1593 block_ctx->pagev[num_pages] = NULL; 1594 } 1595 } 1596 1597 kfree(block_ctx->mem_to_free); 1598 block_ctx->mem_to_free = NULL; 1599 block_ctx->pagev = NULL; 1600 block_ctx->datav = NULL; 1601 } 1602 } 1603 1604 static int btrfsic_read_block(struct btrfsic_state *state, 1605 struct btrfsic_block_data_ctx *block_ctx) 1606 { 1607 unsigned int num_pages; 1608 unsigned int i; 1609 u64 dev_bytenr; 1610 int ret; 1611 1612 BUG_ON(block_ctx->datav); 1613 BUG_ON(block_ctx->pagev); 1614 BUG_ON(block_ctx->mem_to_free); 1615 if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) { 1616 pr_info("btrfsic: read_block() with unaligned bytenr %llu\n", 1617 block_ctx->dev_bytenr); 1618 return -1; 1619 } 1620 1621 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> 1622 PAGE_SHIFT; 1623 block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) + 1624 sizeof(*block_ctx->pagev)) * 1625 num_pages, GFP_NOFS); 1626 if (!block_ctx->mem_to_free) 1627 return -ENOMEM; 1628 block_ctx->datav = block_ctx->mem_to_free; 1629 block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages); 1630 for (i = 0; i < num_pages; i++) { 1631 block_ctx->pagev[i] = alloc_page(GFP_NOFS); 1632 if (!block_ctx->pagev[i]) 1633 return -1; 1634 } 1635 1636 dev_bytenr = block_ctx->dev_bytenr; 1637 for (i = 0; i < num_pages;) { 1638 struct bio *bio; 1639 unsigned int j; 1640 1641 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); 1642 if (!bio) { 1643 pr_info("btrfsic: bio_alloc() for %u pages failed!\n", 1644 num_pages - i); 1645 return -1; 1646 } 1647 bio->bi_bdev = block_ctx->dev->bdev; 1648 bio->bi_iter.bi_sector = dev_bytenr >> 9; 1649 bio_set_op_attrs(bio, REQ_OP_READ, 0); 1650 1651 for (j = i; j < num_pages; j++) { 1652 ret = bio_add_page(bio, block_ctx->pagev[j], 1653 PAGE_SIZE, 0); 1654 if (PAGE_SIZE != ret) 1655 break; 1656 } 1657 if (j == i) { 1658 pr_info("btrfsic: error, failed to add a single page!\n"); 1659 return -1; 1660 } 1661 if (submit_bio_wait(bio)) { 1662 pr_info("btrfsic: read error at logical %llu dev %s!\n", 1663 block_ctx->start, block_ctx->dev->name); 1664 bio_put(bio); 1665 return -1; 1666 } 1667 bio_put(bio); 1668 dev_bytenr += (j - i) * PAGE_SIZE; 1669 i = j; 1670 } 1671 for (i = 0; i < num_pages; i++) { 1672 block_ctx->datav[i] = kmap(block_ctx->pagev[i]); 1673 if (!block_ctx->datav[i]) { 1674 pr_info("btrfsic: kmap() failed (dev %s)!\n", 1675 block_ctx->dev->name); 1676 return -1; 1677 } 1678 } 1679 1680 return block_ctx->len; 1681 } 1682 1683 static void btrfsic_dump_database(struct btrfsic_state *state) 1684 { 1685 const struct btrfsic_block *b_all; 1686 1687 BUG_ON(NULL == state); 1688 1689 pr_info("all_blocks_list:\n"); 1690 list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) { 1691 const struct btrfsic_block_link *l; 1692 1693 pr_info("%c-block @%llu (%s/%llu/%d)\n", 1694 btrfsic_get_block_type(state, b_all), 1695 b_all->logical_bytenr, b_all->dev_state->name, 1696 b_all->dev_bytenr, b_all->mirror_num); 1697 1698 list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) { 1699 pr_info(" %c @%llu (%s/%llu/%d) refers %u* to %c @%llu (%s/%llu/%d)\n", 1700 btrfsic_get_block_type(state, b_all), 1701 b_all->logical_bytenr, b_all->dev_state->name, 1702 b_all->dev_bytenr, b_all->mirror_num, 1703 l->ref_cnt, 1704 btrfsic_get_block_type(state, l->block_ref_to), 1705 l->block_ref_to->logical_bytenr, 1706 l->block_ref_to->dev_state->name, 1707 l->block_ref_to->dev_bytenr, 1708 l->block_ref_to->mirror_num); 1709 } 1710 1711 list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) { 1712 pr_info(" %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n", 1713 btrfsic_get_block_type(state, b_all), 1714 b_all->logical_bytenr, b_all->dev_state->name, 1715 b_all->dev_bytenr, b_all->mirror_num, 1716 l->ref_cnt, 1717 btrfsic_get_block_type(state, l->block_ref_from), 1718 l->block_ref_from->logical_bytenr, 1719 l->block_ref_from->dev_state->name, 1720 l->block_ref_from->dev_bytenr, 1721 l->block_ref_from->mirror_num); 1722 } 1723 1724 pr_info("\n"); 1725 } 1726 } 1727 1728 /* 1729 * Test whether the disk block contains a tree block (leaf or node) 1730 * (note that this test fails for the super block) 1731 */ 1732 static int btrfsic_test_for_metadata(struct btrfsic_state *state, 1733 char **datav, unsigned int num_pages) 1734 { 1735 struct btrfs_fs_info *fs_info = state->fs_info; 1736 struct btrfs_header *h; 1737 u8 csum[BTRFS_CSUM_SIZE]; 1738 u32 crc = ~(u32)0; 1739 unsigned int i; 1740 1741 if (num_pages * PAGE_SIZE < state->metablock_size) 1742 return 1; /* not metadata */ 1743 num_pages = state->metablock_size >> PAGE_SHIFT; 1744 h = (struct btrfs_header *)datav[0]; 1745 1746 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 1747 return 1; 1748 1749 for (i = 0; i < num_pages; i++) { 1750 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE); 1751 size_t sublen = i ? PAGE_SIZE : 1752 (PAGE_SIZE - BTRFS_CSUM_SIZE); 1753 1754 crc = btrfs_crc32c(crc, data, sublen); 1755 } 1756 btrfs_csum_final(crc, csum); 1757 if (memcmp(csum, h->csum, state->csum_size)) 1758 return 1; 1759 1760 return 0; /* is metadata */ 1761 } 1762 1763 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 1764 u64 dev_bytenr, char **mapped_datav, 1765 unsigned int num_pages, 1766 struct bio *bio, int *bio_is_patched, 1767 struct buffer_head *bh, 1768 int submit_bio_bh_rw) 1769 { 1770 int is_metadata; 1771 struct btrfsic_block *block; 1772 struct btrfsic_block_data_ctx block_ctx; 1773 int ret; 1774 struct btrfsic_state *state = dev_state->state; 1775 struct block_device *bdev = dev_state->bdev; 1776 unsigned int processed_len; 1777 1778 if (NULL != bio_is_patched) 1779 *bio_is_patched = 0; 1780 1781 again: 1782 if (num_pages == 0) 1783 return; 1784 1785 processed_len = 0; 1786 is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav, 1787 num_pages)); 1788 1789 block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr, 1790 &state->block_hashtable); 1791 if (NULL != block) { 1792 u64 bytenr = 0; 1793 struct btrfsic_block_link *l, *tmp; 1794 1795 if (block->is_superblock) { 1796 bytenr = btrfs_super_bytenr((struct btrfs_super_block *) 1797 mapped_datav[0]); 1798 if (num_pages * PAGE_SIZE < 1799 BTRFS_SUPER_INFO_SIZE) { 1800 pr_info("btrfsic: cannot work with too short bios!\n"); 1801 return; 1802 } 1803 is_metadata = 1; 1804 BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_SIZE - 1)); 1805 processed_len = BTRFS_SUPER_INFO_SIZE; 1806 if (state->print_mask & 1807 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { 1808 pr_info("[before new superblock is written]:\n"); 1809 btrfsic_dump_tree_sub(state, block, 0); 1810 } 1811 } 1812 if (is_metadata) { 1813 if (!block->is_superblock) { 1814 if (num_pages * PAGE_SIZE < 1815 state->metablock_size) { 1816 pr_info("btrfsic: cannot work with too short bios!\n"); 1817 return; 1818 } 1819 processed_len = state->metablock_size; 1820 bytenr = btrfs_stack_header_bytenr( 1821 (struct btrfs_header *) 1822 mapped_datav[0]); 1823 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, 1824 dev_state, 1825 dev_bytenr); 1826 } 1827 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) { 1828 if (block->logical_bytenr != bytenr && 1829 !(!block->is_metadata && 1830 block->logical_bytenr == 0)) 1831 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n", 1832 bytenr, dev_state->name, 1833 dev_bytenr, 1834 block->mirror_num, 1835 btrfsic_get_block_type(state, 1836 block), 1837 block->logical_bytenr); 1838 else 1839 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n", 1840 bytenr, dev_state->name, 1841 dev_bytenr, block->mirror_num, 1842 btrfsic_get_block_type(state, 1843 block)); 1844 } 1845 block->logical_bytenr = bytenr; 1846 } else { 1847 if (num_pages * PAGE_SIZE < 1848 state->datablock_size) { 1849 pr_info("btrfsic: cannot work with too short bios!\n"); 1850 return; 1851 } 1852 processed_len = state->datablock_size; 1853 bytenr = block->logical_bytenr; 1854 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1855 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n", 1856 bytenr, dev_state->name, dev_bytenr, 1857 block->mirror_num, 1858 btrfsic_get_block_type(state, block)); 1859 } 1860 1861 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1862 pr_info("ref_to_list: %cE, ref_from_list: %cE\n", 1863 list_empty(&block->ref_to_list) ? ' ' : '!', 1864 list_empty(&block->ref_from_list) ? ' ' : '!'); 1865 if (btrfsic_is_block_ref_by_superblock(state, block, 0)) { 1866 pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), old(gen=%llu, objectid=%llu, type=%d, offset=%llu), new(gen=%llu), which is referenced by most recent superblock (superblockgen=%llu)!\n", 1867 btrfsic_get_block_type(state, block), bytenr, 1868 dev_state->name, dev_bytenr, block->mirror_num, 1869 block->generation, 1870 btrfs_disk_key_objectid(&block->disk_key), 1871 block->disk_key.type, 1872 btrfs_disk_key_offset(&block->disk_key), 1873 btrfs_stack_header_generation( 1874 (struct btrfs_header *) mapped_datav[0]), 1875 state->max_superblock_generation); 1876 btrfsic_dump_tree(state); 1877 } 1878 1879 if (!block->is_iodone && !block->never_written) { 1880 pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu, which is not yet iodone!\n", 1881 btrfsic_get_block_type(state, block), bytenr, 1882 dev_state->name, dev_bytenr, block->mirror_num, 1883 block->generation, 1884 btrfs_stack_header_generation( 1885 (struct btrfs_header *) 1886 mapped_datav[0])); 1887 /* it would not be safe to go on */ 1888 btrfsic_dump_tree(state); 1889 goto continue_loop; 1890 } 1891 1892 /* 1893 * Clear all references of this block. Do not free 1894 * the block itself even if is not referenced anymore 1895 * because it still carries valuable information 1896 * like whether it was ever written and IO completed. 1897 */ 1898 list_for_each_entry_safe(l, tmp, &block->ref_to_list, 1899 node_ref_to) { 1900 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1901 btrfsic_print_rem_link(state, l); 1902 l->ref_cnt--; 1903 if (0 == l->ref_cnt) { 1904 list_del(&l->node_ref_to); 1905 list_del(&l->node_ref_from); 1906 btrfsic_block_link_hashtable_remove(l); 1907 btrfsic_block_link_free(l); 1908 } 1909 } 1910 1911 block_ctx.dev = dev_state; 1912 block_ctx.dev_bytenr = dev_bytenr; 1913 block_ctx.start = bytenr; 1914 block_ctx.len = processed_len; 1915 block_ctx.pagev = NULL; 1916 block_ctx.mem_to_free = NULL; 1917 block_ctx.datav = mapped_datav; 1918 1919 if (is_metadata || state->include_extent_data) { 1920 block->never_written = 0; 1921 block->iodone_w_error = 0; 1922 if (NULL != bio) { 1923 block->is_iodone = 0; 1924 BUG_ON(NULL == bio_is_patched); 1925 if (!*bio_is_patched) { 1926 block->orig_bio_bh_private = 1927 bio->bi_private; 1928 block->orig_bio_bh_end_io.bio = 1929 bio->bi_end_io; 1930 block->next_in_same_bio = NULL; 1931 bio->bi_private = block; 1932 bio->bi_end_io = btrfsic_bio_end_io; 1933 *bio_is_patched = 1; 1934 } else { 1935 struct btrfsic_block *chained_block = 1936 (struct btrfsic_block *) 1937 bio->bi_private; 1938 1939 BUG_ON(NULL == chained_block); 1940 block->orig_bio_bh_private = 1941 chained_block->orig_bio_bh_private; 1942 block->orig_bio_bh_end_io.bio = 1943 chained_block->orig_bio_bh_end_io. 1944 bio; 1945 block->next_in_same_bio = chained_block; 1946 bio->bi_private = block; 1947 } 1948 } else if (NULL != bh) { 1949 block->is_iodone = 0; 1950 block->orig_bio_bh_private = bh->b_private; 1951 block->orig_bio_bh_end_io.bh = bh->b_end_io; 1952 block->next_in_same_bio = NULL; 1953 bh->b_private = block; 1954 bh->b_end_io = btrfsic_bh_end_io; 1955 } else { 1956 block->is_iodone = 1; 1957 block->orig_bio_bh_private = NULL; 1958 block->orig_bio_bh_end_io.bio = NULL; 1959 block->next_in_same_bio = NULL; 1960 } 1961 } 1962 1963 block->flush_gen = dev_state->last_flush_gen + 1; 1964 block->submit_bio_bh_rw = submit_bio_bh_rw; 1965 if (is_metadata) { 1966 block->logical_bytenr = bytenr; 1967 block->is_metadata = 1; 1968 if (block->is_superblock) { 1969 BUG_ON(PAGE_SIZE != 1970 BTRFS_SUPER_INFO_SIZE); 1971 ret = btrfsic_process_written_superblock( 1972 state, 1973 block, 1974 (struct btrfs_super_block *) 1975 mapped_datav[0]); 1976 if (state->print_mask & 1977 BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) { 1978 pr_info("[after new superblock is written]:\n"); 1979 btrfsic_dump_tree_sub(state, block, 0); 1980 } 1981 } else { 1982 block->mirror_num = 0; /* unknown */ 1983 ret = btrfsic_process_metablock( 1984 state, 1985 block, 1986 &block_ctx, 1987 0, 0); 1988 } 1989 if (ret) 1990 pr_info("btrfsic: btrfsic_process_metablock(root @%llu) failed!\n", 1991 dev_bytenr); 1992 } else { 1993 block->is_metadata = 0; 1994 block->mirror_num = 0; /* unknown */ 1995 block->generation = BTRFSIC_GENERATION_UNKNOWN; 1996 if (!state->include_extent_data 1997 && list_empty(&block->ref_from_list)) { 1998 /* 1999 * disk block is overwritten with extent 2000 * data (not meta data) and we are configured 2001 * to not include extent data: take the 2002 * chance and free the block's memory 2003 */ 2004 btrfsic_block_hashtable_remove(block); 2005 list_del(&block->all_blocks_node); 2006 btrfsic_block_free(block); 2007 } 2008 } 2009 btrfsic_release_block_ctx(&block_ctx); 2010 } else { 2011 /* block has not been found in hash table */ 2012 u64 bytenr; 2013 2014 if (!is_metadata) { 2015 processed_len = state->datablock_size; 2016 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2017 pr_info("Written block (%s/%llu/?) !found in hash table, D.\n", 2018 dev_state->name, dev_bytenr); 2019 if (!state->include_extent_data) { 2020 /* ignore that written D block */ 2021 goto continue_loop; 2022 } 2023 2024 /* this is getting ugly for the 2025 * include_extent_data case... */ 2026 bytenr = 0; /* unknown */ 2027 } else { 2028 processed_len = state->metablock_size; 2029 bytenr = btrfs_stack_header_bytenr( 2030 (struct btrfs_header *) 2031 mapped_datav[0]); 2032 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state, 2033 dev_bytenr); 2034 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2035 pr_info("Written block @%llu (%s/%llu/?) !found in hash table, M.\n", 2036 bytenr, dev_state->name, dev_bytenr); 2037 } 2038 2039 block_ctx.dev = dev_state; 2040 block_ctx.dev_bytenr = dev_bytenr; 2041 block_ctx.start = bytenr; 2042 block_ctx.len = processed_len; 2043 block_ctx.pagev = NULL; 2044 block_ctx.mem_to_free = NULL; 2045 block_ctx.datav = mapped_datav; 2046 2047 block = btrfsic_block_alloc(); 2048 if (NULL == block) { 2049 pr_info("btrfsic: error, kmalloc failed!\n"); 2050 btrfsic_release_block_ctx(&block_ctx); 2051 goto continue_loop; 2052 } 2053 block->dev_state = dev_state; 2054 block->dev_bytenr = dev_bytenr; 2055 block->logical_bytenr = bytenr; 2056 block->is_metadata = is_metadata; 2057 block->never_written = 0; 2058 block->iodone_w_error = 0; 2059 block->mirror_num = 0; /* unknown */ 2060 block->flush_gen = dev_state->last_flush_gen + 1; 2061 block->submit_bio_bh_rw = submit_bio_bh_rw; 2062 if (NULL != bio) { 2063 block->is_iodone = 0; 2064 BUG_ON(NULL == bio_is_patched); 2065 if (!*bio_is_patched) { 2066 block->orig_bio_bh_private = bio->bi_private; 2067 block->orig_bio_bh_end_io.bio = bio->bi_end_io; 2068 block->next_in_same_bio = NULL; 2069 bio->bi_private = block; 2070 bio->bi_end_io = btrfsic_bio_end_io; 2071 *bio_is_patched = 1; 2072 } else { 2073 struct btrfsic_block *chained_block = 2074 (struct btrfsic_block *) 2075 bio->bi_private; 2076 2077 BUG_ON(NULL == chained_block); 2078 block->orig_bio_bh_private = 2079 chained_block->orig_bio_bh_private; 2080 block->orig_bio_bh_end_io.bio = 2081 chained_block->orig_bio_bh_end_io.bio; 2082 block->next_in_same_bio = chained_block; 2083 bio->bi_private = block; 2084 } 2085 } else if (NULL != bh) { 2086 block->is_iodone = 0; 2087 block->orig_bio_bh_private = bh->b_private; 2088 block->orig_bio_bh_end_io.bh = bh->b_end_io; 2089 block->next_in_same_bio = NULL; 2090 bh->b_private = block; 2091 bh->b_end_io = btrfsic_bh_end_io; 2092 } else { 2093 block->is_iodone = 1; 2094 block->orig_bio_bh_private = NULL; 2095 block->orig_bio_bh_end_io.bio = NULL; 2096 block->next_in_same_bio = NULL; 2097 } 2098 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2099 pr_info("New written %c-block @%llu (%s/%llu/%d)\n", 2100 is_metadata ? 'M' : 'D', 2101 block->logical_bytenr, block->dev_state->name, 2102 block->dev_bytenr, block->mirror_num); 2103 list_add(&block->all_blocks_node, &state->all_blocks_list); 2104 btrfsic_block_hashtable_add(block, &state->block_hashtable); 2105 2106 if (is_metadata) { 2107 ret = btrfsic_process_metablock(state, block, 2108 &block_ctx, 0, 0); 2109 if (ret) 2110 pr_info("btrfsic: process_metablock(root @%llu) failed!\n", 2111 dev_bytenr); 2112 } 2113 btrfsic_release_block_ctx(&block_ctx); 2114 } 2115 2116 continue_loop: 2117 BUG_ON(!processed_len); 2118 dev_bytenr += processed_len; 2119 mapped_datav += processed_len >> PAGE_SHIFT; 2120 num_pages -= processed_len >> PAGE_SHIFT; 2121 goto again; 2122 } 2123 2124 static void btrfsic_bio_end_io(struct bio *bp) 2125 { 2126 struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private; 2127 int iodone_w_error; 2128 2129 /* mutex is not held! This is not save if IO is not yet completed 2130 * on umount */ 2131 iodone_w_error = 0; 2132 if (bp->bi_error) 2133 iodone_w_error = 1; 2134 2135 BUG_ON(NULL == block); 2136 bp->bi_private = block->orig_bio_bh_private; 2137 bp->bi_end_io = block->orig_bio_bh_end_io.bio; 2138 2139 do { 2140 struct btrfsic_block *next_block; 2141 struct btrfsic_dev_state *const dev_state = block->dev_state; 2142 2143 if ((dev_state->state->print_mask & 2144 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2145 pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n", 2146 bp->bi_error, 2147 btrfsic_get_block_type(dev_state->state, block), 2148 block->logical_bytenr, dev_state->name, 2149 block->dev_bytenr, block->mirror_num); 2150 next_block = block->next_in_same_bio; 2151 block->iodone_w_error = iodone_w_error; 2152 if (block->submit_bio_bh_rw & REQ_PREFLUSH) { 2153 dev_state->last_flush_gen++; 2154 if ((dev_state->state->print_mask & 2155 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2156 pr_info("bio_end_io() new %s flush_gen=%llu\n", 2157 dev_state->name, 2158 dev_state->last_flush_gen); 2159 } 2160 if (block->submit_bio_bh_rw & REQ_FUA) 2161 block->flush_gen = 0; /* FUA completed means block is 2162 * on disk */ 2163 block->is_iodone = 1; /* for FLUSH, this releases the block */ 2164 block = next_block; 2165 } while (NULL != block); 2166 2167 bp->bi_end_io(bp); 2168 } 2169 2170 static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate) 2171 { 2172 struct btrfsic_block *block = (struct btrfsic_block *)bh->b_private; 2173 int iodone_w_error = !uptodate; 2174 struct btrfsic_dev_state *dev_state; 2175 2176 BUG_ON(NULL == block); 2177 dev_state = block->dev_state; 2178 if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2179 pr_info("bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n", 2180 iodone_w_error, 2181 btrfsic_get_block_type(dev_state->state, block), 2182 block->logical_bytenr, block->dev_state->name, 2183 block->dev_bytenr, block->mirror_num); 2184 2185 block->iodone_w_error = iodone_w_error; 2186 if (block->submit_bio_bh_rw & REQ_PREFLUSH) { 2187 dev_state->last_flush_gen++; 2188 if ((dev_state->state->print_mask & 2189 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2190 pr_info("bh_end_io() new %s flush_gen=%llu\n", 2191 dev_state->name, dev_state->last_flush_gen); 2192 } 2193 if (block->submit_bio_bh_rw & REQ_FUA) 2194 block->flush_gen = 0; /* FUA completed means block is on disk */ 2195 2196 bh->b_private = block->orig_bio_bh_private; 2197 bh->b_end_io = block->orig_bio_bh_end_io.bh; 2198 block->is_iodone = 1; /* for FLUSH, this releases the block */ 2199 bh->b_end_io(bh, uptodate); 2200 } 2201 2202 static int btrfsic_process_written_superblock( 2203 struct btrfsic_state *state, 2204 struct btrfsic_block *const superblock, 2205 struct btrfs_super_block *const super_hdr) 2206 { 2207 struct btrfs_fs_info *fs_info = state->fs_info; 2208 int pass; 2209 2210 superblock->generation = btrfs_super_generation(super_hdr); 2211 if (!(superblock->generation > state->max_superblock_generation || 2212 0 == state->max_superblock_generation)) { 2213 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2214 pr_info("btrfsic: superblock @%llu (%s/%llu/%d) with old gen %llu <= %llu\n", 2215 superblock->logical_bytenr, 2216 superblock->dev_state->name, 2217 superblock->dev_bytenr, superblock->mirror_num, 2218 btrfs_super_generation(super_hdr), 2219 state->max_superblock_generation); 2220 } else { 2221 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2222 pr_info("btrfsic: got new superblock @%llu (%s/%llu/%d) with new gen %llu > %llu\n", 2223 superblock->logical_bytenr, 2224 superblock->dev_state->name, 2225 superblock->dev_bytenr, superblock->mirror_num, 2226 btrfs_super_generation(super_hdr), 2227 state->max_superblock_generation); 2228 2229 state->max_superblock_generation = 2230 btrfs_super_generation(super_hdr); 2231 state->latest_superblock = superblock; 2232 } 2233 2234 for (pass = 0; pass < 3; pass++) { 2235 int ret; 2236 u64 next_bytenr; 2237 struct btrfsic_block *next_block; 2238 struct btrfsic_block_data_ctx tmp_next_block_ctx; 2239 struct btrfsic_block_link *l; 2240 int num_copies; 2241 int mirror_num; 2242 const char *additional_string = NULL; 2243 struct btrfs_disk_key tmp_disk_key = {0}; 2244 2245 btrfs_set_disk_key_objectid(&tmp_disk_key, 2246 BTRFS_ROOT_ITEM_KEY); 2247 btrfs_set_disk_key_objectid(&tmp_disk_key, 0); 2248 2249 switch (pass) { 2250 case 0: 2251 btrfs_set_disk_key_objectid(&tmp_disk_key, 2252 BTRFS_ROOT_TREE_OBJECTID); 2253 additional_string = "root "; 2254 next_bytenr = btrfs_super_root(super_hdr); 2255 if (state->print_mask & 2256 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2257 pr_info("root@%llu\n", next_bytenr); 2258 break; 2259 case 1: 2260 btrfs_set_disk_key_objectid(&tmp_disk_key, 2261 BTRFS_CHUNK_TREE_OBJECTID); 2262 additional_string = "chunk "; 2263 next_bytenr = btrfs_super_chunk_root(super_hdr); 2264 if (state->print_mask & 2265 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2266 pr_info("chunk@%llu\n", next_bytenr); 2267 break; 2268 case 2: 2269 btrfs_set_disk_key_objectid(&tmp_disk_key, 2270 BTRFS_TREE_LOG_OBJECTID); 2271 additional_string = "log "; 2272 next_bytenr = btrfs_super_log_root(super_hdr); 2273 if (0 == next_bytenr) 2274 continue; 2275 if (state->print_mask & 2276 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2277 pr_info("log@%llu\n", next_bytenr); 2278 break; 2279 } 2280 2281 num_copies = btrfs_num_copies(fs_info, next_bytenr, 2282 BTRFS_SUPER_INFO_SIZE); 2283 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 2284 pr_info("num_copies(log_bytenr=%llu) = %d\n", 2285 next_bytenr, num_copies); 2286 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2287 int was_created; 2288 2289 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2290 pr_info("btrfsic_process_written_superblock(mirror_num=%d)\n", mirror_num); 2291 ret = btrfsic_map_block(state, next_bytenr, 2292 BTRFS_SUPER_INFO_SIZE, 2293 &tmp_next_block_ctx, 2294 mirror_num); 2295 if (ret) { 2296 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 2297 next_bytenr, mirror_num); 2298 return -1; 2299 } 2300 2301 next_block = btrfsic_block_lookup_or_add( 2302 state, 2303 &tmp_next_block_ctx, 2304 additional_string, 2305 1, 0, 1, 2306 mirror_num, 2307 &was_created); 2308 if (NULL == next_block) { 2309 pr_info("btrfsic: error, kmalloc failed!\n"); 2310 btrfsic_release_block_ctx(&tmp_next_block_ctx); 2311 return -1; 2312 } 2313 2314 next_block->disk_key = tmp_disk_key; 2315 if (was_created) 2316 next_block->generation = 2317 BTRFSIC_GENERATION_UNKNOWN; 2318 l = btrfsic_block_link_lookup_or_add( 2319 state, 2320 &tmp_next_block_ctx, 2321 next_block, 2322 superblock, 2323 BTRFSIC_GENERATION_UNKNOWN); 2324 btrfsic_release_block_ctx(&tmp_next_block_ctx); 2325 if (NULL == l) 2326 return -1; 2327 } 2328 } 2329 2330 if (WARN_ON(-1 == btrfsic_check_all_ref_blocks(state, superblock, 0))) 2331 btrfsic_dump_tree(state); 2332 2333 return 0; 2334 } 2335 2336 static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, 2337 struct btrfsic_block *const block, 2338 int recursion_level) 2339 { 2340 const struct btrfsic_block_link *l; 2341 int ret = 0; 2342 2343 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { 2344 /* 2345 * Note that this situation can happen and does not 2346 * indicate an error in regular cases. It happens 2347 * when disk blocks are freed and later reused. 2348 * The check-integrity module is not aware of any 2349 * block free operations, it just recognizes block 2350 * write operations. Therefore it keeps the linkage 2351 * information for a block until a block is 2352 * rewritten. This can temporarily cause incorrect 2353 * and even circular linkage informations. This 2354 * causes no harm unless such blocks are referenced 2355 * by the most recent super block. 2356 */ 2357 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2358 pr_info("btrfsic: abort cyclic linkage (case 1).\n"); 2359 2360 return ret; 2361 } 2362 2363 /* 2364 * This algorithm is recursive because the amount of used stack 2365 * space is very small and the max recursion depth is limited. 2366 */ 2367 list_for_each_entry(l, &block->ref_to_list, node_ref_to) { 2368 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2369 pr_info("rl=%d, %c @%llu (%s/%llu/%d) %u* refers to %c @%llu (%s/%llu/%d)\n", 2370 recursion_level, 2371 btrfsic_get_block_type(state, block), 2372 block->logical_bytenr, block->dev_state->name, 2373 block->dev_bytenr, block->mirror_num, 2374 l->ref_cnt, 2375 btrfsic_get_block_type(state, l->block_ref_to), 2376 l->block_ref_to->logical_bytenr, 2377 l->block_ref_to->dev_state->name, 2378 l->block_ref_to->dev_bytenr, 2379 l->block_ref_to->mirror_num); 2380 if (l->block_ref_to->never_written) { 2381 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is never written!\n", 2382 btrfsic_get_block_type(state, l->block_ref_to), 2383 l->block_ref_to->logical_bytenr, 2384 l->block_ref_to->dev_state->name, 2385 l->block_ref_to->dev_bytenr, 2386 l->block_ref_to->mirror_num); 2387 ret = -1; 2388 } else if (!l->block_ref_to->is_iodone) { 2389 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not yet iodone!\n", 2390 btrfsic_get_block_type(state, l->block_ref_to), 2391 l->block_ref_to->logical_bytenr, 2392 l->block_ref_to->dev_state->name, 2393 l->block_ref_to->dev_bytenr, 2394 l->block_ref_to->mirror_num); 2395 ret = -1; 2396 } else if (l->block_ref_to->iodone_w_error) { 2397 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which has write error!\n", 2398 btrfsic_get_block_type(state, l->block_ref_to), 2399 l->block_ref_to->logical_bytenr, 2400 l->block_ref_to->dev_state->name, 2401 l->block_ref_to->dev_bytenr, 2402 l->block_ref_to->mirror_num); 2403 ret = -1; 2404 } else if (l->parent_generation != 2405 l->block_ref_to->generation && 2406 BTRFSIC_GENERATION_UNKNOWN != 2407 l->parent_generation && 2408 BTRFSIC_GENERATION_UNKNOWN != 2409 l->block_ref_to->generation) { 2410 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) with generation %llu != parent generation %llu!\n", 2411 btrfsic_get_block_type(state, l->block_ref_to), 2412 l->block_ref_to->logical_bytenr, 2413 l->block_ref_to->dev_state->name, 2414 l->block_ref_to->dev_bytenr, 2415 l->block_ref_to->mirror_num, 2416 l->block_ref_to->generation, 2417 l->parent_generation); 2418 ret = -1; 2419 } else if (l->block_ref_to->flush_gen > 2420 l->block_ref_to->dev_state->last_flush_gen) { 2421 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not flushed out of disk's write cache (block flush_gen=%llu, dev->flush_gen=%llu)!\n", 2422 btrfsic_get_block_type(state, l->block_ref_to), 2423 l->block_ref_to->logical_bytenr, 2424 l->block_ref_to->dev_state->name, 2425 l->block_ref_to->dev_bytenr, 2426 l->block_ref_to->mirror_num, block->flush_gen, 2427 l->block_ref_to->dev_state->last_flush_gen); 2428 ret = -1; 2429 } else if (-1 == btrfsic_check_all_ref_blocks(state, 2430 l->block_ref_to, 2431 recursion_level + 2432 1)) { 2433 ret = -1; 2434 } 2435 } 2436 2437 return ret; 2438 } 2439 2440 static int btrfsic_is_block_ref_by_superblock( 2441 const struct btrfsic_state *state, 2442 const struct btrfsic_block *block, 2443 int recursion_level) 2444 { 2445 const struct btrfsic_block_link *l; 2446 2447 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { 2448 /* refer to comment at "abort cyclic linkage (case 1)" */ 2449 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2450 pr_info("btrfsic: abort cyclic linkage (case 2).\n"); 2451 2452 return 0; 2453 } 2454 2455 /* 2456 * This algorithm is recursive because the amount of used stack space 2457 * is very small and the max recursion depth is limited. 2458 */ 2459 list_for_each_entry(l, &block->ref_from_list, node_ref_from) { 2460 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2461 pr_info("rl=%d, %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n", 2462 recursion_level, 2463 btrfsic_get_block_type(state, block), 2464 block->logical_bytenr, block->dev_state->name, 2465 block->dev_bytenr, block->mirror_num, 2466 l->ref_cnt, 2467 btrfsic_get_block_type(state, l->block_ref_from), 2468 l->block_ref_from->logical_bytenr, 2469 l->block_ref_from->dev_state->name, 2470 l->block_ref_from->dev_bytenr, 2471 l->block_ref_from->mirror_num); 2472 if (l->block_ref_from->is_superblock && 2473 state->latest_superblock->dev_bytenr == 2474 l->block_ref_from->dev_bytenr && 2475 state->latest_superblock->dev_state->bdev == 2476 l->block_ref_from->dev_state->bdev) 2477 return 1; 2478 else if (btrfsic_is_block_ref_by_superblock(state, 2479 l->block_ref_from, 2480 recursion_level + 2481 1)) 2482 return 1; 2483 } 2484 2485 return 0; 2486 } 2487 2488 static void btrfsic_print_add_link(const struct btrfsic_state *state, 2489 const struct btrfsic_block_link *l) 2490 { 2491 pr_info("Add %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n", 2492 l->ref_cnt, 2493 btrfsic_get_block_type(state, l->block_ref_from), 2494 l->block_ref_from->logical_bytenr, 2495 l->block_ref_from->dev_state->name, 2496 l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, 2497 btrfsic_get_block_type(state, l->block_ref_to), 2498 l->block_ref_to->logical_bytenr, 2499 l->block_ref_to->dev_state->name, l->block_ref_to->dev_bytenr, 2500 l->block_ref_to->mirror_num); 2501 } 2502 2503 static void btrfsic_print_rem_link(const struct btrfsic_state *state, 2504 const struct btrfsic_block_link *l) 2505 { 2506 pr_info("Rem %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n", 2507 l->ref_cnt, 2508 btrfsic_get_block_type(state, l->block_ref_from), 2509 l->block_ref_from->logical_bytenr, 2510 l->block_ref_from->dev_state->name, 2511 l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, 2512 btrfsic_get_block_type(state, l->block_ref_to), 2513 l->block_ref_to->logical_bytenr, 2514 l->block_ref_to->dev_state->name, l->block_ref_to->dev_bytenr, 2515 l->block_ref_to->mirror_num); 2516 } 2517 2518 static char btrfsic_get_block_type(const struct btrfsic_state *state, 2519 const struct btrfsic_block *block) 2520 { 2521 if (block->is_superblock && 2522 state->latest_superblock->dev_bytenr == block->dev_bytenr && 2523 state->latest_superblock->dev_state->bdev == block->dev_state->bdev) 2524 return 'S'; 2525 else if (block->is_superblock) 2526 return 's'; 2527 else if (block->is_metadata) 2528 return 'M'; 2529 else 2530 return 'D'; 2531 } 2532 2533 static void btrfsic_dump_tree(const struct btrfsic_state *state) 2534 { 2535 btrfsic_dump_tree_sub(state, state->latest_superblock, 0); 2536 } 2537 2538 static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, 2539 const struct btrfsic_block *block, 2540 int indent_level) 2541 { 2542 const struct btrfsic_block_link *l; 2543 int indent_add; 2544 static char buf[80]; 2545 int cursor_position; 2546 2547 /* 2548 * Should better fill an on-stack buffer with a complete line and 2549 * dump it at once when it is time to print a newline character. 2550 */ 2551 2552 /* 2553 * This algorithm is recursive because the amount of used stack space 2554 * is very small and the max recursion depth is limited. 2555 */ 2556 indent_add = sprintf(buf, "%c-%llu(%s/%llu/%u)", 2557 btrfsic_get_block_type(state, block), 2558 block->logical_bytenr, block->dev_state->name, 2559 block->dev_bytenr, block->mirror_num); 2560 if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { 2561 printk("[...]\n"); 2562 return; 2563 } 2564 printk(buf); 2565 indent_level += indent_add; 2566 if (list_empty(&block->ref_to_list)) { 2567 printk("\n"); 2568 return; 2569 } 2570 if (block->mirror_num > 1 && 2571 !(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) { 2572 printk(" [...]\n"); 2573 return; 2574 } 2575 2576 cursor_position = indent_level; 2577 list_for_each_entry(l, &block->ref_to_list, node_ref_to) { 2578 while (cursor_position < indent_level) { 2579 printk(" "); 2580 cursor_position++; 2581 } 2582 if (l->ref_cnt > 1) 2583 indent_add = sprintf(buf, " %d*--> ", l->ref_cnt); 2584 else 2585 indent_add = sprintf(buf, " --> "); 2586 if (indent_level + indent_add > 2587 BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { 2588 printk("[...]\n"); 2589 cursor_position = 0; 2590 continue; 2591 } 2592 2593 printk(buf); 2594 2595 btrfsic_dump_tree_sub(state, l->block_ref_to, 2596 indent_level + indent_add); 2597 cursor_position = 0; 2598 } 2599 } 2600 2601 static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( 2602 struct btrfsic_state *state, 2603 struct btrfsic_block_data_ctx *next_block_ctx, 2604 struct btrfsic_block *next_block, 2605 struct btrfsic_block *from_block, 2606 u64 parent_generation) 2607 { 2608 struct btrfsic_block_link *l; 2609 2610 l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev, 2611 next_block_ctx->dev_bytenr, 2612 from_block->dev_state->bdev, 2613 from_block->dev_bytenr, 2614 &state->block_link_hashtable); 2615 if (NULL == l) { 2616 l = btrfsic_block_link_alloc(); 2617 if (NULL == l) { 2618 pr_info("btrfsic: error, kmalloc failed!\n"); 2619 return NULL; 2620 } 2621 2622 l->block_ref_to = next_block; 2623 l->block_ref_from = from_block; 2624 l->ref_cnt = 1; 2625 l->parent_generation = parent_generation; 2626 2627 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2628 btrfsic_print_add_link(state, l); 2629 2630 list_add(&l->node_ref_to, &from_block->ref_to_list); 2631 list_add(&l->node_ref_from, &next_block->ref_from_list); 2632 2633 btrfsic_block_link_hashtable_add(l, 2634 &state->block_link_hashtable); 2635 } else { 2636 l->ref_cnt++; 2637 l->parent_generation = parent_generation; 2638 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2639 btrfsic_print_add_link(state, l); 2640 } 2641 2642 return l; 2643 } 2644 2645 static struct btrfsic_block *btrfsic_block_lookup_or_add( 2646 struct btrfsic_state *state, 2647 struct btrfsic_block_data_ctx *block_ctx, 2648 const char *additional_string, 2649 int is_metadata, 2650 int is_iodone, 2651 int never_written, 2652 int mirror_num, 2653 int *was_created) 2654 { 2655 struct btrfsic_block *block; 2656 2657 block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev, 2658 block_ctx->dev_bytenr, 2659 &state->block_hashtable); 2660 if (NULL == block) { 2661 struct btrfsic_dev_state *dev_state; 2662 2663 block = btrfsic_block_alloc(); 2664 if (NULL == block) { 2665 pr_info("btrfsic: error, kmalloc failed!\n"); 2666 return NULL; 2667 } 2668 dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev); 2669 if (NULL == dev_state) { 2670 pr_info("btrfsic: error, lookup dev_state failed!\n"); 2671 btrfsic_block_free(block); 2672 return NULL; 2673 } 2674 block->dev_state = dev_state; 2675 block->dev_bytenr = block_ctx->dev_bytenr; 2676 block->logical_bytenr = block_ctx->start; 2677 block->is_metadata = is_metadata; 2678 block->is_iodone = is_iodone; 2679 block->never_written = never_written; 2680 block->mirror_num = mirror_num; 2681 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2682 pr_info("New %s%c-block @%llu (%s/%llu/%d)\n", 2683 additional_string, 2684 btrfsic_get_block_type(state, block), 2685 block->logical_bytenr, dev_state->name, 2686 block->dev_bytenr, mirror_num); 2687 list_add(&block->all_blocks_node, &state->all_blocks_list); 2688 btrfsic_block_hashtable_add(block, &state->block_hashtable); 2689 if (NULL != was_created) 2690 *was_created = 1; 2691 } else { 2692 if (NULL != was_created) 2693 *was_created = 0; 2694 } 2695 2696 return block; 2697 } 2698 2699 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 2700 u64 bytenr, 2701 struct btrfsic_dev_state *dev_state, 2702 u64 dev_bytenr) 2703 { 2704 struct btrfs_fs_info *fs_info = state->fs_info; 2705 struct btrfsic_block_data_ctx block_ctx; 2706 int num_copies; 2707 int mirror_num; 2708 int match = 0; 2709 int ret; 2710 2711 num_copies = btrfs_num_copies(fs_info, bytenr, state->metablock_size); 2712 2713 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2714 ret = btrfsic_map_block(state, bytenr, state->metablock_size, 2715 &block_ctx, mirror_num); 2716 if (ret) { 2717 pr_info("btrfsic: btrfsic_map_block(logical @%llu, mirror %d) failed!\n", 2718 bytenr, mirror_num); 2719 continue; 2720 } 2721 2722 if (dev_state->bdev == block_ctx.dev->bdev && 2723 dev_bytenr == block_ctx.dev_bytenr) { 2724 match++; 2725 btrfsic_release_block_ctx(&block_ctx); 2726 break; 2727 } 2728 btrfsic_release_block_ctx(&block_ctx); 2729 } 2730 2731 if (WARN_ON(!match)) { 2732 pr_info("btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio, buffer->log_bytenr=%llu, submit_bio(bdev=%s, phys_bytenr=%llu)!\n", 2733 bytenr, dev_state->name, dev_bytenr); 2734 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2735 ret = btrfsic_map_block(state, bytenr, 2736 state->metablock_size, 2737 &block_ctx, mirror_num); 2738 if (ret) 2739 continue; 2740 2741 pr_info("Read logical bytenr @%llu maps to (%s/%llu/%d)\n", 2742 bytenr, block_ctx.dev->name, 2743 block_ctx.dev_bytenr, mirror_num); 2744 } 2745 } 2746 } 2747 2748 static struct btrfsic_dev_state *btrfsic_dev_state_lookup( 2749 struct block_device *bdev) 2750 { 2751 return btrfsic_dev_state_hashtable_lookup(bdev, 2752 &btrfsic_dev_state_hashtable); 2753 } 2754 2755 int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh) 2756 { 2757 struct btrfsic_dev_state *dev_state; 2758 2759 if (!btrfsic_is_initialized) 2760 return submit_bh(op, op_flags, bh); 2761 2762 mutex_lock(&btrfsic_mutex); 2763 /* since btrfsic_submit_bh() might also be called before 2764 * btrfsic_mount(), this might return NULL */ 2765 dev_state = btrfsic_dev_state_lookup(bh->b_bdev); 2766 2767 /* Only called to write the superblock (incl. FLUSH/FUA) */ 2768 if (NULL != dev_state && 2769 (op == REQ_OP_WRITE) && bh->b_size > 0) { 2770 u64 dev_bytenr; 2771 2772 dev_bytenr = 4096 * bh->b_blocknr; 2773 if (dev_state->state->print_mask & 2774 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2775 pr_info("submit_bh(op=0x%x,0x%x, blocknr=%llu (bytenr %llu), size=%zu, data=%p, bdev=%p)\n", 2776 op, op_flags, (unsigned long long)bh->b_blocknr, 2777 dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev); 2778 btrfsic_process_written_block(dev_state, dev_bytenr, 2779 &bh->b_data, 1, NULL, 2780 NULL, bh, op_flags); 2781 } else if (NULL != dev_state && (op_flags & REQ_PREFLUSH)) { 2782 if (dev_state->state->print_mask & 2783 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2784 pr_info("submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n", 2785 op, op_flags, bh->b_bdev); 2786 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2787 if ((dev_state->state->print_mask & 2788 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 2789 BTRFSIC_PRINT_MASK_VERBOSE))) 2790 pr_info("btrfsic_submit_bh(%s) with FLUSH but dummy block already in use (ignored)!\n", 2791 dev_state->name); 2792 } else { 2793 struct btrfsic_block *const block = 2794 &dev_state->dummy_block_for_bio_bh_flush; 2795 2796 block->is_iodone = 0; 2797 block->never_written = 0; 2798 block->iodone_w_error = 0; 2799 block->flush_gen = dev_state->last_flush_gen + 1; 2800 block->submit_bio_bh_rw = op_flags; 2801 block->orig_bio_bh_private = bh->b_private; 2802 block->orig_bio_bh_end_io.bh = bh->b_end_io; 2803 block->next_in_same_bio = NULL; 2804 bh->b_private = block; 2805 bh->b_end_io = btrfsic_bh_end_io; 2806 } 2807 } 2808 mutex_unlock(&btrfsic_mutex); 2809 return submit_bh(op, op_flags, bh); 2810 } 2811 2812 static void __btrfsic_submit_bio(struct bio *bio) 2813 { 2814 struct btrfsic_dev_state *dev_state; 2815 2816 if (!btrfsic_is_initialized) 2817 return; 2818 2819 mutex_lock(&btrfsic_mutex); 2820 /* since btrfsic_submit_bio() is also called before 2821 * btrfsic_mount(), this might return NULL */ 2822 dev_state = btrfsic_dev_state_lookup(bio->bi_bdev); 2823 if (NULL != dev_state && 2824 (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) { 2825 unsigned int i; 2826 u64 dev_bytenr; 2827 u64 cur_bytenr; 2828 struct bio_vec *bvec; 2829 int bio_is_patched; 2830 char **mapped_datav; 2831 2832 dev_bytenr = 512 * bio->bi_iter.bi_sector; 2833 bio_is_patched = 0; 2834 if (dev_state->state->print_mask & 2835 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2836 pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", 2837 bio_op(bio), bio->bi_opf, bio->bi_vcnt, 2838 (unsigned long long)bio->bi_iter.bi_sector, 2839 dev_bytenr, bio->bi_bdev); 2840 2841 mapped_datav = kmalloc_array(bio->bi_vcnt, 2842 sizeof(*mapped_datav), GFP_NOFS); 2843 if (!mapped_datav) 2844 goto leave; 2845 cur_bytenr = dev_bytenr; 2846 2847 bio_for_each_segment_all(bvec, bio, i) { 2848 BUG_ON(bvec->bv_len != PAGE_SIZE); 2849 mapped_datav[i] = kmap(bvec->bv_page); 2850 2851 if (dev_state->state->print_mask & 2852 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE) 2853 pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n", 2854 i, cur_bytenr, bvec->bv_len, bvec->bv_offset); 2855 cur_bytenr += bvec->bv_len; 2856 } 2857 btrfsic_process_written_block(dev_state, dev_bytenr, 2858 mapped_datav, bio->bi_vcnt, 2859 bio, &bio_is_patched, 2860 NULL, bio->bi_opf); 2861 bio_for_each_segment_all(bvec, bio, i) 2862 kunmap(bvec->bv_page); 2863 kfree(mapped_datav); 2864 } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) { 2865 if (dev_state->state->print_mask & 2866 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2867 pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n", 2868 bio_op(bio), bio->bi_opf, bio->bi_bdev); 2869 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2870 if ((dev_state->state->print_mask & 2871 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 2872 BTRFSIC_PRINT_MASK_VERBOSE))) 2873 pr_info("btrfsic_submit_bio(%s) with FLUSH but dummy block already in use (ignored)!\n", 2874 dev_state->name); 2875 } else { 2876 struct btrfsic_block *const block = 2877 &dev_state->dummy_block_for_bio_bh_flush; 2878 2879 block->is_iodone = 0; 2880 block->never_written = 0; 2881 block->iodone_w_error = 0; 2882 block->flush_gen = dev_state->last_flush_gen + 1; 2883 block->submit_bio_bh_rw = bio->bi_opf; 2884 block->orig_bio_bh_private = bio->bi_private; 2885 block->orig_bio_bh_end_io.bio = bio->bi_end_io; 2886 block->next_in_same_bio = NULL; 2887 bio->bi_private = block; 2888 bio->bi_end_io = btrfsic_bio_end_io; 2889 } 2890 } 2891 leave: 2892 mutex_unlock(&btrfsic_mutex); 2893 } 2894 2895 void btrfsic_submit_bio(struct bio *bio) 2896 { 2897 __btrfsic_submit_bio(bio); 2898 submit_bio(bio); 2899 } 2900 2901 int btrfsic_submit_bio_wait(struct bio *bio) 2902 { 2903 __btrfsic_submit_bio(bio); 2904 return submit_bio_wait(bio); 2905 } 2906 2907 int btrfsic_mount(struct btrfs_fs_info *fs_info, 2908 struct btrfs_fs_devices *fs_devices, 2909 int including_extent_data, u32 print_mask) 2910 { 2911 int ret; 2912 struct btrfsic_state *state; 2913 struct list_head *dev_head = &fs_devices->devices; 2914 struct btrfs_device *device; 2915 2916 if (fs_info->nodesize & ((u64)PAGE_SIZE - 1)) { 2917 pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n", 2918 fs_info->nodesize, PAGE_SIZE); 2919 return -1; 2920 } 2921 if (fs_info->sectorsize & ((u64)PAGE_SIZE - 1)) { 2922 pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n", 2923 fs_info->sectorsize, PAGE_SIZE); 2924 return -1; 2925 } 2926 state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 2927 if (!state) { 2928 state = vzalloc(sizeof(*state)); 2929 if (!state) { 2930 pr_info("btrfs check-integrity: vzalloc() failed!\n"); 2931 return -1; 2932 } 2933 } 2934 2935 if (!btrfsic_is_initialized) { 2936 mutex_init(&btrfsic_mutex); 2937 btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable); 2938 btrfsic_is_initialized = 1; 2939 } 2940 mutex_lock(&btrfsic_mutex); 2941 state->fs_info = fs_info; 2942 state->print_mask = print_mask; 2943 state->include_extent_data = including_extent_data; 2944 state->csum_size = 0; 2945 state->metablock_size = fs_info->nodesize; 2946 state->datablock_size = fs_info->sectorsize; 2947 INIT_LIST_HEAD(&state->all_blocks_list); 2948 btrfsic_block_hashtable_init(&state->block_hashtable); 2949 btrfsic_block_link_hashtable_init(&state->block_link_hashtable); 2950 state->max_superblock_generation = 0; 2951 state->latest_superblock = NULL; 2952 2953 list_for_each_entry(device, dev_head, dev_list) { 2954 struct btrfsic_dev_state *ds; 2955 const char *p; 2956 2957 if (!device->bdev || !device->name) 2958 continue; 2959 2960 ds = btrfsic_dev_state_alloc(); 2961 if (NULL == ds) { 2962 pr_info("btrfs check-integrity: kmalloc() failed!\n"); 2963 mutex_unlock(&btrfsic_mutex); 2964 return -1; 2965 } 2966 ds->bdev = device->bdev; 2967 ds->state = state; 2968 bdevname(ds->bdev, ds->name); 2969 ds->name[BDEVNAME_SIZE - 1] = '\0'; 2970 p = kbasename(ds->name); 2971 strlcpy(ds->name, p, sizeof(ds->name)); 2972 btrfsic_dev_state_hashtable_add(ds, 2973 &btrfsic_dev_state_hashtable); 2974 } 2975 2976 ret = btrfsic_process_superblock(state, fs_devices); 2977 if (0 != ret) { 2978 mutex_unlock(&btrfsic_mutex); 2979 btrfsic_unmount(fs_devices); 2980 return ret; 2981 } 2982 2983 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE) 2984 btrfsic_dump_database(state); 2985 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE) 2986 btrfsic_dump_tree(state); 2987 2988 mutex_unlock(&btrfsic_mutex); 2989 return 0; 2990 } 2991 2992 void btrfsic_unmount(struct btrfs_fs_devices *fs_devices) 2993 { 2994 struct btrfsic_block *b_all, *tmp_all; 2995 struct btrfsic_state *state; 2996 struct list_head *dev_head = &fs_devices->devices; 2997 struct btrfs_device *device; 2998 2999 if (!btrfsic_is_initialized) 3000 return; 3001 3002 mutex_lock(&btrfsic_mutex); 3003 3004 state = NULL; 3005 list_for_each_entry(device, dev_head, dev_list) { 3006 struct btrfsic_dev_state *ds; 3007 3008 if (!device->bdev || !device->name) 3009 continue; 3010 3011 ds = btrfsic_dev_state_hashtable_lookup( 3012 device->bdev, 3013 &btrfsic_dev_state_hashtable); 3014 if (NULL != ds) { 3015 state = ds->state; 3016 btrfsic_dev_state_hashtable_remove(ds); 3017 btrfsic_dev_state_free(ds); 3018 } 3019 } 3020 3021 if (NULL == state) { 3022 pr_info("btrfsic: error, cannot find state information on umount!\n"); 3023 mutex_unlock(&btrfsic_mutex); 3024 return; 3025 } 3026 3027 /* 3028 * Don't care about keeping the lists' state up to date, 3029 * just free all memory that was allocated dynamically. 3030 * Free the blocks and the block_links. 3031 */ 3032 list_for_each_entry_safe(b_all, tmp_all, &state->all_blocks_list, 3033 all_blocks_node) { 3034 struct btrfsic_block_link *l, *tmp; 3035 3036 list_for_each_entry_safe(l, tmp, &b_all->ref_to_list, 3037 node_ref_to) { 3038 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 3039 btrfsic_print_rem_link(state, l); 3040 3041 l->ref_cnt--; 3042 if (0 == l->ref_cnt) 3043 btrfsic_block_link_free(l); 3044 } 3045 3046 if (b_all->is_iodone || b_all->never_written) 3047 btrfsic_block_free(b_all); 3048 else 3049 pr_info("btrfs: attempt to free %c-block @%llu (%s/%llu/%d) on umount which is not yet iodone!\n", 3050 btrfsic_get_block_type(state, b_all), 3051 b_all->logical_bytenr, b_all->dev_state->name, 3052 b_all->dev_bytenr, b_all->mirror_num); 3053 } 3054 3055 mutex_unlock(&btrfsic_mutex); 3056 3057 kvfree(state); 3058 } 3059