1 /* 2 * Copyright (C) STRATO AG 2011. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 /* 20 * This module can be used to catch cases when the btrfs kernel 21 * code executes write requests to the disk that bring the file 22 * system in an inconsistent state. In such a state, a power-loss 23 * or kernel panic event would cause that the data on disk is 24 * lost or at least damaged. 25 * 26 * Code is added that examines all block write requests during 27 * runtime (including writes of the super block). Three rules 28 * are verified and an error is printed on violation of the 29 * rules: 30 * 1. It is not allowed to write a disk block which is 31 * currently referenced by the super block (either directly 32 * or indirectly). 33 * 2. When a super block is written, it is verified that all 34 * referenced (directly or indirectly) blocks fulfill the 35 * following requirements: 36 * 2a. All referenced blocks have either been present when 37 * the file system was mounted, (i.e., they have been 38 * referenced by the super block) or they have been 39 * written since then and the write completion callback 40 * was called and no write error was indicated and a 41 * FLUSH request to the device where these blocks are 42 * located was received and completed. 43 * 2b. All referenced blocks need to have a generation 44 * number which is equal to the parent's number. 45 * 46 * One issue that was found using this module was that the log 47 * tree on disk became temporarily corrupted because disk blocks 48 * that had been in use for the log tree had been freed and 49 * reused too early, while being referenced by the written super 50 * block. 51 * 52 * The search term in the kernel log that can be used to filter 53 * on the existence of detected integrity issues is 54 * "btrfs: attempt". 55 * 56 * The integrity check is enabled via mount options. These 57 * mount options are only supported if the integrity check 58 * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY. 59 * 60 * Example #1, apply integrity checks to all metadata: 61 * mount /dev/sdb1 /mnt -o check_int 62 * 63 * Example #2, apply integrity checks to all metadata and 64 * to data extents: 65 * mount /dev/sdb1 /mnt -o check_int_data 66 * 67 * Example #3, apply integrity checks to all metadata and dump 68 * the tree that the super block references to kernel messages 69 * each time after a super block was written: 70 * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263 71 * 72 * If the integrity check tool is included and activated in 73 * the mount options, plenty of kernel memory is used, and 74 * plenty of additional CPU cycles are spent. Enabling this 75 * functionality is not intended for normal use. In most 76 * cases, unless you are a btrfs developer who needs to verify 77 * the integrity of (super)-block write requests, do not 78 * enable the config option BTRFS_FS_CHECK_INTEGRITY to 79 * include and compile the integrity check tool. 80 * 81 * Expect millions of lines of information in the kernel log with an 82 * enabled check_int_print_mask. Therefore set LOG_BUF_SHIFT in the 83 * kernel config to at least 26 (which is 64MB). Usually the value is 84 * limited to 21 (which is 2MB) in init/Kconfig. The file needs to be 85 * changed like this before LOG_BUF_SHIFT can be set to a high value: 86 * config LOG_BUF_SHIFT 87 * int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" 88 * range 12 30 89 */ 90 91 #include <linux/sched.h> 92 #include <linux/slab.h> 93 #include <linux/buffer_head.h> 94 #include <linux/mutex.h> 95 #include <linux/genhd.h> 96 #include <linux/blkdev.h> 97 #include "ctree.h" 98 #include "disk-io.h" 99 #include "hash.h" 100 #include "transaction.h" 101 #include "extent_io.h" 102 #include "volumes.h" 103 #include "print-tree.h" 104 #include "locking.h" 105 #include "check-integrity.h" 106 #include "rcu-string.h" 107 108 #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 109 #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 110 #define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100 111 #define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051 112 #define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807 113 #define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530 114 #define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300 115 #define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters, 116 * excluding " [...]" */ 117 #define BTRFSIC_GENERATION_UNKNOWN ((u64)-1) 118 119 /* 120 * The definition of the bitmask fields for the print_mask. 121 * They are specified with the mount option check_integrity_print_mask. 122 */ 123 #define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE 0x00000001 124 #define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION 0x00000002 125 #define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE 0x00000004 126 #define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE 0x00000008 127 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH 0x00000010 128 #define BTRFSIC_PRINT_MASK_END_IO_BIO_BH 0x00000020 129 #define BTRFSIC_PRINT_MASK_VERBOSE 0x00000040 130 #define BTRFSIC_PRINT_MASK_VERY_VERBOSE 0x00000080 131 #define BTRFSIC_PRINT_MASK_INITIAL_TREE 0x00000100 132 #define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES 0x00000200 133 #define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400 134 #define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800 135 #define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000 136 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE 0x00002000 137 138 struct btrfsic_dev_state; 139 struct btrfsic_state; 140 141 struct btrfsic_block { 142 u32 magic_num; /* only used for debug purposes */ 143 unsigned int is_metadata:1; /* if it is meta-data, not data-data */ 144 unsigned int is_superblock:1; /* if it is one of the superblocks */ 145 unsigned int is_iodone:1; /* if is done by lower subsystem */ 146 unsigned int iodone_w_error:1; /* error was indicated to endio */ 147 unsigned int never_written:1; /* block was added because it was 148 * referenced, not because it was 149 * written */ 150 unsigned int mirror_num; /* large enough to hold 151 * BTRFS_SUPER_MIRROR_MAX */ 152 struct btrfsic_dev_state *dev_state; 153 u64 dev_bytenr; /* key, physical byte num on disk */ 154 u64 logical_bytenr; /* logical byte num on disk */ 155 u64 generation; 156 struct btrfs_disk_key disk_key; /* extra info to print in case of 157 * issues, will not always be correct */ 158 struct list_head collision_resolving_node; /* list node */ 159 struct list_head all_blocks_node; /* list node */ 160 161 /* the following two lists contain block_link items */ 162 struct list_head ref_to_list; /* list */ 163 struct list_head ref_from_list; /* list */ 164 struct btrfsic_block *next_in_same_bio; 165 void *orig_bio_bh_private; 166 union { 167 bio_end_io_t *bio; 168 bh_end_io_t *bh; 169 } orig_bio_bh_end_io; 170 int submit_bio_bh_rw; 171 u64 flush_gen; /* only valid if !never_written */ 172 }; 173 174 /* 175 * Elements of this type are allocated dynamically and required because 176 * each block object can refer to and can be ref from multiple blocks. 177 * The key to lookup them in the hashtable is the dev_bytenr of 178 * the block ref to plus the one from the block refered from. 179 * The fact that they are searchable via a hashtable and that a 180 * ref_cnt is maintained is not required for the btrfs integrity 181 * check algorithm itself, it is only used to make the output more 182 * beautiful in case that an error is detected (an error is defined 183 * as a write operation to a block while that block is still referenced). 184 */ 185 struct btrfsic_block_link { 186 u32 magic_num; /* only used for debug purposes */ 187 u32 ref_cnt; 188 struct list_head node_ref_to; /* list node */ 189 struct list_head node_ref_from; /* list node */ 190 struct list_head collision_resolving_node; /* list node */ 191 struct btrfsic_block *block_ref_to; 192 struct btrfsic_block *block_ref_from; 193 u64 parent_generation; 194 }; 195 196 struct btrfsic_dev_state { 197 u32 magic_num; /* only used for debug purposes */ 198 struct block_device *bdev; 199 struct btrfsic_state *state; 200 struct list_head collision_resolving_node; /* list node */ 201 struct btrfsic_block dummy_block_for_bio_bh_flush; 202 u64 last_flush_gen; 203 char name[BDEVNAME_SIZE]; 204 }; 205 206 struct btrfsic_block_hashtable { 207 struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE]; 208 }; 209 210 struct btrfsic_block_link_hashtable { 211 struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE]; 212 }; 213 214 struct btrfsic_dev_state_hashtable { 215 struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE]; 216 }; 217 218 struct btrfsic_block_data_ctx { 219 u64 start; /* virtual bytenr */ 220 u64 dev_bytenr; /* physical bytenr on device */ 221 u32 len; 222 struct btrfsic_dev_state *dev; 223 char **datav; 224 struct page **pagev; 225 void *mem_to_free; 226 }; 227 228 /* This structure is used to implement recursion without occupying 229 * any stack space, refer to btrfsic_process_metablock() */ 230 struct btrfsic_stack_frame { 231 u32 magic; 232 u32 nr; 233 int error; 234 int i; 235 int limit_nesting; 236 int num_copies; 237 int mirror_num; 238 struct btrfsic_block *block; 239 struct btrfsic_block_data_ctx *block_ctx; 240 struct btrfsic_block *next_block; 241 struct btrfsic_block_data_ctx next_block_ctx; 242 struct btrfs_header *hdr; 243 struct btrfsic_stack_frame *prev; 244 }; 245 246 /* Some state per mounted filesystem */ 247 struct btrfsic_state { 248 u32 print_mask; 249 int include_extent_data; 250 int csum_size; 251 struct list_head all_blocks_list; 252 struct btrfsic_block_hashtable block_hashtable; 253 struct btrfsic_block_link_hashtable block_link_hashtable; 254 struct btrfs_root *root; 255 u64 max_superblock_generation; 256 struct btrfsic_block *latest_superblock; 257 u32 metablock_size; 258 u32 datablock_size; 259 }; 260 261 static void btrfsic_block_init(struct btrfsic_block *b); 262 static struct btrfsic_block *btrfsic_block_alloc(void); 263 static void btrfsic_block_free(struct btrfsic_block *b); 264 static void btrfsic_block_link_init(struct btrfsic_block_link *n); 265 static struct btrfsic_block_link *btrfsic_block_link_alloc(void); 266 static void btrfsic_block_link_free(struct btrfsic_block_link *n); 267 static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds); 268 static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void); 269 static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds); 270 static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h); 271 static void btrfsic_block_hashtable_add(struct btrfsic_block *b, 272 struct btrfsic_block_hashtable *h); 273 static void btrfsic_block_hashtable_remove(struct btrfsic_block *b); 274 static struct btrfsic_block *btrfsic_block_hashtable_lookup( 275 struct block_device *bdev, 276 u64 dev_bytenr, 277 struct btrfsic_block_hashtable *h); 278 static void btrfsic_block_link_hashtable_init( 279 struct btrfsic_block_link_hashtable *h); 280 static void btrfsic_block_link_hashtable_add( 281 struct btrfsic_block_link *l, 282 struct btrfsic_block_link_hashtable *h); 283 static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l); 284 static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup( 285 struct block_device *bdev_ref_to, 286 u64 dev_bytenr_ref_to, 287 struct block_device *bdev_ref_from, 288 u64 dev_bytenr_ref_from, 289 struct btrfsic_block_link_hashtable *h); 290 static void btrfsic_dev_state_hashtable_init( 291 struct btrfsic_dev_state_hashtable *h); 292 static void btrfsic_dev_state_hashtable_add( 293 struct btrfsic_dev_state *ds, 294 struct btrfsic_dev_state_hashtable *h); 295 static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds); 296 static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( 297 struct block_device *bdev, 298 struct btrfsic_dev_state_hashtable *h); 299 static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void); 300 static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf); 301 static int btrfsic_process_superblock(struct btrfsic_state *state, 302 struct btrfs_fs_devices *fs_devices); 303 static int btrfsic_process_metablock(struct btrfsic_state *state, 304 struct btrfsic_block *block, 305 struct btrfsic_block_data_ctx *block_ctx, 306 int limit_nesting, int force_iodone_flag); 307 static void btrfsic_read_from_block_data( 308 struct btrfsic_block_data_ctx *block_ctx, 309 void *dst, u32 offset, size_t len); 310 static int btrfsic_create_link_to_next_block( 311 struct btrfsic_state *state, 312 struct btrfsic_block *block, 313 struct btrfsic_block_data_ctx 314 *block_ctx, u64 next_bytenr, 315 int limit_nesting, 316 struct btrfsic_block_data_ctx *next_block_ctx, 317 struct btrfsic_block **next_blockp, 318 int force_iodone_flag, 319 int *num_copiesp, int *mirror_nump, 320 struct btrfs_disk_key *disk_key, 321 u64 parent_generation); 322 static int btrfsic_handle_extent_data(struct btrfsic_state *state, 323 struct btrfsic_block *block, 324 struct btrfsic_block_data_ctx *block_ctx, 325 u32 item_offset, int force_iodone_flag); 326 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 327 struct btrfsic_block_data_ctx *block_ctx_out, 328 int mirror_num); 329 static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr, 330 u32 len, struct block_device *bdev, 331 struct btrfsic_block_data_ctx *block_ctx_out); 332 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx); 333 static int btrfsic_read_block(struct btrfsic_state *state, 334 struct btrfsic_block_data_ctx *block_ctx); 335 static void btrfsic_dump_database(struct btrfsic_state *state); 336 static int btrfsic_test_for_metadata(struct btrfsic_state *state, 337 char **datav, unsigned int num_pages); 338 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 339 u64 dev_bytenr, char **mapped_datav, 340 unsigned int num_pages, 341 struct bio *bio, int *bio_is_patched, 342 struct buffer_head *bh, 343 int submit_bio_bh_rw); 344 static int btrfsic_process_written_superblock( 345 struct btrfsic_state *state, 346 struct btrfsic_block *const block, 347 struct btrfs_super_block *const super_hdr); 348 static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status); 349 static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate); 350 static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state, 351 const struct btrfsic_block *block, 352 int recursion_level); 353 static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, 354 struct btrfsic_block *const block, 355 int recursion_level); 356 static void btrfsic_print_add_link(const struct btrfsic_state *state, 357 const struct btrfsic_block_link *l); 358 static void btrfsic_print_rem_link(const struct btrfsic_state *state, 359 const struct btrfsic_block_link *l); 360 static char btrfsic_get_block_type(const struct btrfsic_state *state, 361 const struct btrfsic_block *block); 362 static void btrfsic_dump_tree(const struct btrfsic_state *state); 363 static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, 364 const struct btrfsic_block *block, 365 int indent_level); 366 static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( 367 struct btrfsic_state *state, 368 struct btrfsic_block_data_ctx *next_block_ctx, 369 struct btrfsic_block *next_block, 370 struct btrfsic_block *from_block, 371 u64 parent_generation); 372 static struct btrfsic_block *btrfsic_block_lookup_or_add( 373 struct btrfsic_state *state, 374 struct btrfsic_block_data_ctx *block_ctx, 375 const char *additional_string, 376 int is_metadata, 377 int is_iodone, 378 int never_written, 379 int mirror_num, 380 int *was_created); 381 static int btrfsic_process_superblock_dev_mirror( 382 struct btrfsic_state *state, 383 struct btrfsic_dev_state *dev_state, 384 struct btrfs_device *device, 385 int superblock_mirror_num, 386 struct btrfsic_dev_state **selected_dev_state, 387 struct btrfs_super_block *selected_super); 388 static struct btrfsic_dev_state *btrfsic_dev_state_lookup( 389 struct block_device *bdev); 390 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 391 u64 bytenr, 392 struct btrfsic_dev_state *dev_state, 393 u64 dev_bytenr); 394 395 static struct mutex btrfsic_mutex; 396 static int btrfsic_is_initialized; 397 static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable; 398 399 400 static void btrfsic_block_init(struct btrfsic_block *b) 401 { 402 b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER; 403 b->dev_state = NULL; 404 b->dev_bytenr = 0; 405 b->logical_bytenr = 0; 406 b->generation = BTRFSIC_GENERATION_UNKNOWN; 407 b->disk_key.objectid = 0; 408 b->disk_key.type = 0; 409 b->disk_key.offset = 0; 410 b->is_metadata = 0; 411 b->is_superblock = 0; 412 b->is_iodone = 0; 413 b->iodone_w_error = 0; 414 b->never_written = 0; 415 b->mirror_num = 0; 416 b->next_in_same_bio = NULL; 417 b->orig_bio_bh_private = NULL; 418 b->orig_bio_bh_end_io.bio = NULL; 419 INIT_LIST_HEAD(&b->collision_resolving_node); 420 INIT_LIST_HEAD(&b->all_blocks_node); 421 INIT_LIST_HEAD(&b->ref_to_list); 422 INIT_LIST_HEAD(&b->ref_from_list); 423 b->submit_bio_bh_rw = 0; 424 b->flush_gen = 0; 425 } 426 427 static struct btrfsic_block *btrfsic_block_alloc(void) 428 { 429 struct btrfsic_block *b; 430 431 b = kzalloc(sizeof(*b), GFP_NOFS); 432 if (NULL != b) 433 btrfsic_block_init(b); 434 435 return b; 436 } 437 438 static void btrfsic_block_free(struct btrfsic_block *b) 439 { 440 BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num)); 441 kfree(b); 442 } 443 444 static void btrfsic_block_link_init(struct btrfsic_block_link *l) 445 { 446 l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER; 447 l->ref_cnt = 1; 448 INIT_LIST_HEAD(&l->node_ref_to); 449 INIT_LIST_HEAD(&l->node_ref_from); 450 INIT_LIST_HEAD(&l->collision_resolving_node); 451 l->block_ref_to = NULL; 452 l->block_ref_from = NULL; 453 } 454 455 static struct btrfsic_block_link *btrfsic_block_link_alloc(void) 456 { 457 struct btrfsic_block_link *l; 458 459 l = kzalloc(sizeof(*l), GFP_NOFS); 460 if (NULL != l) 461 btrfsic_block_link_init(l); 462 463 return l; 464 } 465 466 static void btrfsic_block_link_free(struct btrfsic_block_link *l) 467 { 468 BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num)); 469 kfree(l); 470 } 471 472 static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds) 473 { 474 ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER; 475 ds->bdev = NULL; 476 ds->state = NULL; 477 ds->name[0] = '\0'; 478 INIT_LIST_HEAD(&ds->collision_resolving_node); 479 ds->last_flush_gen = 0; 480 btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush); 481 ds->dummy_block_for_bio_bh_flush.is_iodone = 1; 482 ds->dummy_block_for_bio_bh_flush.dev_state = ds; 483 } 484 485 static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void) 486 { 487 struct btrfsic_dev_state *ds; 488 489 ds = kzalloc(sizeof(*ds), GFP_NOFS); 490 if (NULL != ds) 491 btrfsic_dev_state_init(ds); 492 493 return ds; 494 } 495 496 static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds) 497 { 498 BUG_ON(!(NULL == ds || 499 BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num)); 500 kfree(ds); 501 } 502 503 static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h) 504 { 505 int i; 506 507 for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++) 508 INIT_LIST_HEAD(h->table + i); 509 } 510 511 static void btrfsic_block_hashtable_add(struct btrfsic_block *b, 512 struct btrfsic_block_hashtable *h) 513 { 514 const unsigned int hashval = 515 (((unsigned int)(b->dev_bytenr >> 16)) ^ 516 ((unsigned int)((uintptr_t)b->dev_state->bdev))) & 517 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); 518 519 list_add(&b->collision_resolving_node, h->table + hashval); 520 } 521 522 static void btrfsic_block_hashtable_remove(struct btrfsic_block *b) 523 { 524 list_del(&b->collision_resolving_node); 525 } 526 527 static struct btrfsic_block *btrfsic_block_hashtable_lookup( 528 struct block_device *bdev, 529 u64 dev_bytenr, 530 struct btrfsic_block_hashtable *h) 531 { 532 const unsigned int hashval = 533 (((unsigned int)(dev_bytenr >> 16)) ^ 534 ((unsigned int)((uintptr_t)bdev))) & 535 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); 536 struct list_head *elem; 537 538 list_for_each(elem, h->table + hashval) { 539 struct btrfsic_block *const b = 540 list_entry(elem, struct btrfsic_block, 541 collision_resolving_node); 542 543 if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr) 544 return b; 545 } 546 547 return NULL; 548 } 549 550 static void btrfsic_block_link_hashtable_init( 551 struct btrfsic_block_link_hashtable *h) 552 { 553 int i; 554 555 for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++) 556 INIT_LIST_HEAD(h->table + i); 557 } 558 559 static void btrfsic_block_link_hashtable_add( 560 struct btrfsic_block_link *l, 561 struct btrfsic_block_link_hashtable *h) 562 { 563 const unsigned int hashval = 564 (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^ 565 ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^ 566 ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^ 567 ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev))) 568 & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); 569 570 BUG_ON(NULL == l->block_ref_to); 571 BUG_ON(NULL == l->block_ref_from); 572 list_add(&l->collision_resolving_node, h->table + hashval); 573 } 574 575 static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l) 576 { 577 list_del(&l->collision_resolving_node); 578 } 579 580 static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup( 581 struct block_device *bdev_ref_to, 582 u64 dev_bytenr_ref_to, 583 struct block_device *bdev_ref_from, 584 u64 dev_bytenr_ref_from, 585 struct btrfsic_block_link_hashtable *h) 586 { 587 const unsigned int hashval = 588 (((unsigned int)(dev_bytenr_ref_to >> 16)) ^ 589 ((unsigned int)(dev_bytenr_ref_from >> 16)) ^ 590 ((unsigned int)((uintptr_t)bdev_ref_to)) ^ 591 ((unsigned int)((uintptr_t)bdev_ref_from))) & 592 (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); 593 struct list_head *elem; 594 595 list_for_each(elem, h->table + hashval) { 596 struct btrfsic_block_link *const l = 597 list_entry(elem, struct btrfsic_block_link, 598 collision_resolving_node); 599 600 BUG_ON(NULL == l->block_ref_to); 601 BUG_ON(NULL == l->block_ref_from); 602 if (l->block_ref_to->dev_state->bdev == bdev_ref_to && 603 l->block_ref_to->dev_bytenr == dev_bytenr_ref_to && 604 l->block_ref_from->dev_state->bdev == bdev_ref_from && 605 l->block_ref_from->dev_bytenr == dev_bytenr_ref_from) 606 return l; 607 } 608 609 return NULL; 610 } 611 612 static void btrfsic_dev_state_hashtable_init( 613 struct btrfsic_dev_state_hashtable *h) 614 { 615 int i; 616 617 for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++) 618 INIT_LIST_HEAD(h->table + i); 619 } 620 621 static void btrfsic_dev_state_hashtable_add( 622 struct btrfsic_dev_state *ds, 623 struct btrfsic_dev_state_hashtable *h) 624 { 625 const unsigned int hashval = 626 (((unsigned int)((uintptr_t)ds->bdev)) & 627 (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1)); 628 629 list_add(&ds->collision_resolving_node, h->table + hashval); 630 } 631 632 static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds) 633 { 634 list_del(&ds->collision_resolving_node); 635 } 636 637 static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( 638 struct block_device *bdev, 639 struct btrfsic_dev_state_hashtable *h) 640 { 641 const unsigned int hashval = 642 (((unsigned int)((uintptr_t)bdev)) & 643 (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1)); 644 struct list_head *elem; 645 646 list_for_each(elem, h->table + hashval) { 647 struct btrfsic_dev_state *const ds = 648 list_entry(elem, struct btrfsic_dev_state, 649 collision_resolving_node); 650 651 if (ds->bdev == bdev) 652 return ds; 653 } 654 655 return NULL; 656 } 657 658 static int btrfsic_process_superblock(struct btrfsic_state *state, 659 struct btrfs_fs_devices *fs_devices) 660 { 661 int ret = 0; 662 struct btrfs_super_block *selected_super; 663 struct list_head *dev_head = &fs_devices->devices; 664 struct btrfs_device *device; 665 struct btrfsic_dev_state *selected_dev_state = NULL; 666 int pass; 667 668 BUG_ON(NULL == state); 669 selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS); 670 if (NULL == selected_super) { 671 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 672 return -1; 673 } 674 675 list_for_each_entry(device, dev_head, dev_list) { 676 int i; 677 struct btrfsic_dev_state *dev_state; 678 679 if (!device->bdev || !device->name) 680 continue; 681 682 dev_state = btrfsic_dev_state_lookup(device->bdev); 683 BUG_ON(NULL == dev_state); 684 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 685 ret = btrfsic_process_superblock_dev_mirror( 686 state, dev_state, device, i, 687 &selected_dev_state, selected_super); 688 if (0 != ret && 0 == i) { 689 kfree(selected_super); 690 return ret; 691 } 692 } 693 } 694 695 if (NULL == state->latest_superblock) { 696 printk(KERN_INFO "btrfsic: no superblock found!\n"); 697 kfree(selected_super); 698 return -1; 699 } 700 701 state->csum_size = btrfs_super_csum_size(selected_super); 702 703 for (pass = 0; pass < 3; pass++) { 704 int num_copies; 705 int mirror_num; 706 u64 next_bytenr; 707 708 switch (pass) { 709 case 0: 710 next_bytenr = btrfs_super_root(selected_super); 711 if (state->print_mask & 712 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 713 printk(KERN_INFO "root@%llu\n", next_bytenr); 714 break; 715 case 1: 716 next_bytenr = btrfs_super_chunk_root(selected_super); 717 if (state->print_mask & 718 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 719 printk(KERN_INFO "chunk@%llu\n", next_bytenr); 720 break; 721 case 2: 722 next_bytenr = btrfs_super_log_root(selected_super); 723 if (0 == next_bytenr) 724 continue; 725 if (state->print_mask & 726 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 727 printk(KERN_INFO "log@%llu\n", next_bytenr); 728 break; 729 } 730 731 num_copies = 732 btrfs_num_copies(state->root->fs_info, 733 next_bytenr, state->metablock_size); 734 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 735 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 736 next_bytenr, num_copies); 737 738 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 739 struct btrfsic_block *next_block; 740 struct btrfsic_block_data_ctx tmp_next_block_ctx; 741 struct btrfsic_block_link *l; 742 743 ret = btrfsic_map_block(state, next_bytenr, 744 state->metablock_size, 745 &tmp_next_block_ctx, 746 mirror_num); 747 if (ret) { 748 printk(KERN_INFO "btrfsic:" 749 " btrfsic_map_block(root @%llu," 750 " mirror %d) failed!\n", 751 next_bytenr, mirror_num); 752 kfree(selected_super); 753 return -1; 754 } 755 756 next_block = btrfsic_block_hashtable_lookup( 757 tmp_next_block_ctx.dev->bdev, 758 tmp_next_block_ctx.dev_bytenr, 759 &state->block_hashtable); 760 BUG_ON(NULL == next_block); 761 762 l = btrfsic_block_link_hashtable_lookup( 763 tmp_next_block_ctx.dev->bdev, 764 tmp_next_block_ctx.dev_bytenr, 765 state->latest_superblock->dev_state-> 766 bdev, 767 state->latest_superblock->dev_bytenr, 768 &state->block_link_hashtable); 769 BUG_ON(NULL == l); 770 771 ret = btrfsic_read_block(state, &tmp_next_block_ctx); 772 if (ret < (int)PAGE_CACHE_SIZE) { 773 printk(KERN_INFO 774 "btrfsic: read @logical %llu failed!\n", 775 tmp_next_block_ctx.start); 776 btrfsic_release_block_ctx(&tmp_next_block_ctx); 777 kfree(selected_super); 778 return -1; 779 } 780 781 ret = btrfsic_process_metablock(state, 782 next_block, 783 &tmp_next_block_ctx, 784 BTRFS_MAX_LEVEL + 3, 1); 785 btrfsic_release_block_ctx(&tmp_next_block_ctx); 786 } 787 } 788 789 kfree(selected_super); 790 return ret; 791 } 792 793 static int btrfsic_process_superblock_dev_mirror( 794 struct btrfsic_state *state, 795 struct btrfsic_dev_state *dev_state, 796 struct btrfs_device *device, 797 int superblock_mirror_num, 798 struct btrfsic_dev_state **selected_dev_state, 799 struct btrfs_super_block *selected_super) 800 { 801 struct btrfs_super_block *super_tmp; 802 u64 dev_bytenr; 803 struct buffer_head *bh; 804 struct btrfsic_block *superblock_tmp; 805 int pass; 806 struct block_device *const superblock_bdev = device->bdev; 807 808 /* super block bytenr is always the unmapped device bytenr */ 809 dev_bytenr = btrfs_sb_offset(superblock_mirror_num); 810 if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->commit_total_bytes) 811 return -1; 812 bh = __bread(superblock_bdev, dev_bytenr / 4096, 813 BTRFS_SUPER_INFO_SIZE); 814 if (NULL == bh) 815 return -1; 816 super_tmp = (struct btrfs_super_block *) 817 (bh->b_data + (dev_bytenr & 4095)); 818 819 if (btrfs_super_bytenr(super_tmp) != dev_bytenr || 820 btrfs_super_magic(super_tmp) != BTRFS_MAGIC || 821 memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) || 822 btrfs_super_nodesize(super_tmp) != state->metablock_size || 823 btrfs_super_sectorsize(super_tmp) != state->datablock_size) { 824 brelse(bh); 825 return 0; 826 } 827 828 superblock_tmp = 829 btrfsic_block_hashtable_lookup(superblock_bdev, 830 dev_bytenr, 831 &state->block_hashtable); 832 if (NULL == superblock_tmp) { 833 superblock_tmp = btrfsic_block_alloc(); 834 if (NULL == superblock_tmp) { 835 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 836 brelse(bh); 837 return -1; 838 } 839 /* for superblock, only the dev_bytenr makes sense */ 840 superblock_tmp->dev_bytenr = dev_bytenr; 841 superblock_tmp->dev_state = dev_state; 842 superblock_tmp->logical_bytenr = dev_bytenr; 843 superblock_tmp->generation = btrfs_super_generation(super_tmp); 844 superblock_tmp->is_metadata = 1; 845 superblock_tmp->is_superblock = 1; 846 superblock_tmp->is_iodone = 1; 847 superblock_tmp->never_written = 0; 848 superblock_tmp->mirror_num = 1 + superblock_mirror_num; 849 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 850 printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)" 851 " @%llu (%s/%llu/%d)\n", 852 superblock_bdev, 853 rcu_str_deref(device->name), dev_bytenr, 854 dev_state->name, dev_bytenr, 855 superblock_mirror_num); 856 list_add(&superblock_tmp->all_blocks_node, 857 &state->all_blocks_list); 858 btrfsic_block_hashtable_add(superblock_tmp, 859 &state->block_hashtable); 860 } 861 862 /* select the one with the highest generation field */ 863 if (btrfs_super_generation(super_tmp) > 864 state->max_superblock_generation || 865 0 == state->max_superblock_generation) { 866 memcpy(selected_super, super_tmp, sizeof(*selected_super)); 867 *selected_dev_state = dev_state; 868 state->max_superblock_generation = 869 btrfs_super_generation(super_tmp); 870 state->latest_superblock = superblock_tmp; 871 } 872 873 for (pass = 0; pass < 3; pass++) { 874 u64 next_bytenr; 875 int num_copies; 876 int mirror_num; 877 const char *additional_string = NULL; 878 struct btrfs_disk_key tmp_disk_key; 879 880 tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY; 881 tmp_disk_key.offset = 0; 882 switch (pass) { 883 case 0: 884 btrfs_set_disk_key_objectid(&tmp_disk_key, 885 BTRFS_ROOT_TREE_OBJECTID); 886 additional_string = "initial root "; 887 next_bytenr = btrfs_super_root(super_tmp); 888 break; 889 case 1: 890 btrfs_set_disk_key_objectid(&tmp_disk_key, 891 BTRFS_CHUNK_TREE_OBJECTID); 892 additional_string = "initial chunk "; 893 next_bytenr = btrfs_super_chunk_root(super_tmp); 894 break; 895 case 2: 896 btrfs_set_disk_key_objectid(&tmp_disk_key, 897 BTRFS_TREE_LOG_OBJECTID); 898 additional_string = "initial log "; 899 next_bytenr = btrfs_super_log_root(super_tmp); 900 if (0 == next_bytenr) 901 continue; 902 break; 903 } 904 905 num_copies = 906 btrfs_num_copies(state->root->fs_info, 907 next_bytenr, state->metablock_size); 908 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 909 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 910 next_bytenr, num_copies); 911 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 912 struct btrfsic_block *next_block; 913 struct btrfsic_block_data_ctx tmp_next_block_ctx; 914 struct btrfsic_block_link *l; 915 916 if (btrfsic_map_block(state, next_bytenr, 917 state->metablock_size, 918 &tmp_next_block_ctx, 919 mirror_num)) { 920 printk(KERN_INFO "btrfsic: btrfsic_map_block(" 921 "bytenr @%llu, mirror %d) failed!\n", 922 next_bytenr, mirror_num); 923 brelse(bh); 924 return -1; 925 } 926 927 next_block = btrfsic_block_lookup_or_add( 928 state, &tmp_next_block_ctx, 929 additional_string, 1, 1, 0, 930 mirror_num, NULL); 931 if (NULL == next_block) { 932 btrfsic_release_block_ctx(&tmp_next_block_ctx); 933 brelse(bh); 934 return -1; 935 } 936 937 next_block->disk_key = tmp_disk_key; 938 next_block->generation = BTRFSIC_GENERATION_UNKNOWN; 939 l = btrfsic_block_link_lookup_or_add( 940 state, &tmp_next_block_ctx, 941 next_block, superblock_tmp, 942 BTRFSIC_GENERATION_UNKNOWN); 943 btrfsic_release_block_ctx(&tmp_next_block_ctx); 944 if (NULL == l) { 945 brelse(bh); 946 return -1; 947 } 948 } 949 } 950 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES) 951 btrfsic_dump_tree_sub(state, superblock_tmp, 0); 952 953 brelse(bh); 954 return 0; 955 } 956 957 static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void) 958 { 959 struct btrfsic_stack_frame *sf; 960 961 sf = kzalloc(sizeof(*sf), GFP_NOFS); 962 if (NULL == sf) 963 printk(KERN_INFO "btrfsic: alloc memory failed!\n"); 964 else 965 sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER; 966 return sf; 967 } 968 969 static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf) 970 { 971 BUG_ON(!(NULL == sf || 972 BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic)); 973 kfree(sf); 974 } 975 976 static int btrfsic_process_metablock( 977 struct btrfsic_state *state, 978 struct btrfsic_block *const first_block, 979 struct btrfsic_block_data_ctx *const first_block_ctx, 980 int first_limit_nesting, int force_iodone_flag) 981 { 982 struct btrfsic_stack_frame initial_stack_frame = { 0 }; 983 struct btrfsic_stack_frame *sf; 984 struct btrfsic_stack_frame *next_stack; 985 struct btrfs_header *const first_hdr = 986 (struct btrfs_header *)first_block_ctx->datav[0]; 987 988 BUG_ON(!first_hdr); 989 sf = &initial_stack_frame; 990 sf->error = 0; 991 sf->i = -1; 992 sf->limit_nesting = first_limit_nesting; 993 sf->block = first_block; 994 sf->block_ctx = first_block_ctx; 995 sf->next_block = NULL; 996 sf->hdr = first_hdr; 997 sf->prev = NULL; 998 999 continue_with_new_stack_frame: 1000 sf->block->generation = le64_to_cpu(sf->hdr->generation); 1001 if (0 == sf->hdr->level) { 1002 struct btrfs_leaf *const leafhdr = 1003 (struct btrfs_leaf *)sf->hdr; 1004 1005 if (-1 == sf->i) { 1006 sf->nr = btrfs_stack_header_nritems(&leafhdr->header); 1007 1008 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1009 printk(KERN_INFO 1010 "leaf %llu items %d generation %llu" 1011 " owner %llu\n", 1012 sf->block_ctx->start, sf->nr, 1013 btrfs_stack_header_generation( 1014 &leafhdr->header), 1015 btrfs_stack_header_owner( 1016 &leafhdr->header)); 1017 } 1018 1019 continue_with_current_leaf_stack_frame: 1020 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { 1021 sf->i++; 1022 sf->num_copies = 0; 1023 } 1024 1025 if (sf->i < sf->nr) { 1026 struct btrfs_item disk_item; 1027 u32 disk_item_offset = 1028 (uintptr_t)(leafhdr->items + sf->i) - 1029 (uintptr_t)leafhdr; 1030 struct btrfs_disk_key *disk_key; 1031 u8 type; 1032 u32 item_offset; 1033 u32 item_size; 1034 1035 if (disk_item_offset + sizeof(struct btrfs_item) > 1036 sf->block_ctx->len) { 1037 leaf_item_out_of_bounce_error: 1038 printk(KERN_INFO 1039 "btrfsic: leaf item out of bounce at logical %llu, dev %s\n", 1040 sf->block_ctx->start, 1041 sf->block_ctx->dev->name); 1042 goto one_stack_frame_backwards; 1043 } 1044 btrfsic_read_from_block_data(sf->block_ctx, 1045 &disk_item, 1046 disk_item_offset, 1047 sizeof(struct btrfs_item)); 1048 item_offset = btrfs_stack_item_offset(&disk_item); 1049 item_size = btrfs_stack_item_size(&disk_item); 1050 disk_key = &disk_item.key; 1051 type = btrfs_disk_key_type(disk_key); 1052 1053 if (BTRFS_ROOT_ITEM_KEY == type) { 1054 struct btrfs_root_item root_item; 1055 u32 root_item_offset; 1056 u64 next_bytenr; 1057 1058 root_item_offset = item_offset + 1059 offsetof(struct btrfs_leaf, items); 1060 if (root_item_offset + item_size > 1061 sf->block_ctx->len) 1062 goto leaf_item_out_of_bounce_error; 1063 btrfsic_read_from_block_data( 1064 sf->block_ctx, &root_item, 1065 root_item_offset, 1066 item_size); 1067 next_bytenr = btrfs_root_bytenr(&root_item); 1068 1069 sf->error = 1070 btrfsic_create_link_to_next_block( 1071 state, 1072 sf->block, 1073 sf->block_ctx, 1074 next_bytenr, 1075 sf->limit_nesting, 1076 &sf->next_block_ctx, 1077 &sf->next_block, 1078 force_iodone_flag, 1079 &sf->num_copies, 1080 &sf->mirror_num, 1081 disk_key, 1082 btrfs_root_generation( 1083 &root_item)); 1084 if (sf->error) 1085 goto one_stack_frame_backwards; 1086 1087 if (NULL != sf->next_block) { 1088 struct btrfs_header *const next_hdr = 1089 (struct btrfs_header *) 1090 sf->next_block_ctx.datav[0]; 1091 1092 next_stack = 1093 btrfsic_stack_frame_alloc(); 1094 if (NULL == next_stack) { 1095 sf->error = -1; 1096 btrfsic_release_block_ctx( 1097 &sf-> 1098 next_block_ctx); 1099 goto one_stack_frame_backwards; 1100 } 1101 1102 next_stack->i = -1; 1103 next_stack->block = sf->next_block; 1104 next_stack->block_ctx = 1105 &sf->next_block_ctx; 1106 next_stack->next_block = NULL; 1107 next_stack->hdr = next_hdr; 1108 next_stack->limit_nesting = 1109 sf->limit_nesting - 1; 1110 next_stack->prev = sf; 1111 sf = next_stack; 1112 goto continue_with_new_stack_frame; 1113 } 1114 } else if (BTRFS_EXTENT_DATA_KEY == type && 1115 state->include_extent_data) { 1116 sf->error = btrfsic_handle_extent_data( 1117 state, 1118 sf->block, 1119 sf->block_ctx, 1120 item_offset, 1121 force_iodone_flag); 1122 if (sf->error) 1123 goto one_stack_frame_backwards; 1124 } 1125 1126 goto continue_with_current_leaf_stack_frame; 1127 } 1128 } else { 1129 struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr; 1130 1131 if (-1 == sf->i) { 1132 sf->nr = btrfs_stack_header_nritems(&nodehdr->header); 1133 1134 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1135 printk(KERN_INFO "node %llu level %d items %d" 1136 " generation %llu owner %llu\n", 1137 sf->block_ctx->start, 1138 nodehdr->header.level, sf->nr, 1139 btrfs_stack_header_generation( 1140 &nodehdr->header), 1141 btrfs_stack_header_owner( 1142 &nodehdr->header)); 1143 } 1144 1145 continue_with_current_node_stack_frame: 1146 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { 1147 sf->i++; 1148 sf->num_copies = 0; 1149 } 1150 1151 if (sf->i < sf->nr) { 1152 struct btrfs_key_ptr key_ptr; 1153 u32 key_ptr_offset; 1154 u64 next_bytenr; 1155 1156 key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) - 1157 (uintptr_t)nodehdr; 1158 if (key_ptr_offset + sizeof(struct btrfs_key_ptr) > 1159 sf->block_ctx->len) { 1160 printk(KERN_INFO 1161 "btrfsic: node item out of bounce at logical %llu, dev %s\n", 1162 sf->block_ctx->start, 1163 sf->block_ctx->dev->name); 1164 goto one_stack_frame_backwards; 1165 } 1166 btrfsic_read_from_block_data( 1167 sf->block_ctx, &key_ptr, key_ptr_offset, 1168 sizeof(struct btrfs_key_ptr)); 1169 next_bytenr = btrfs_stack_key_blockptr(&key_ptr); 1170 1171 sf->error = btrfsic_create_link_to_next_block( 1172 state, 1173 sf->block, 1174 sf->block_ctx, 1175 next_bytenr, 1176 sf->limit_nesting, 1177 &sf->next_block_ctx, 1178 &sf->next_block, 1179 force_iodone_flag, 1180 &sf->num_copies, 1181 &sf->mirror_num, 1182 &key_ptr.key, 1183 btrfs_stack_key_generation(&key_ptr)); 1184 if (sf->error) 1185 goto one_stack_frame_backwards; 1186 1187 if (NULL != sf->next_block) { 1188 struct btrfs_header *const next_hdr = 1189 (struct btrfs_header *) 1190 sf->next_block_ctx.datav[0]; 1191 1192 next_stack = btrfsic_stack_frame_alloc(); 1193 if (NULL == next_stack) { 1194 sf->error = -1; 1195 goto one_stack_frame_backwards; 1196 } 1197 1198 next_stack->i = -1; 1199 next_stack->block = sf->next_block; 1200 next_stack->block_ctx = &sf->next_block_ctx; 1201 next_stack->next_block = NULL; 1202 next_stack->hdr = next_hdr; 1203 next_stack->limit_nesting = 1204 sf->limit_nesting - 1; 1205 next_stack->prev = sf; 1206 sf = next_stack; 1207 goto continue_with_new_stack_frame; 1208 } 1209 1210 goto continue_with_current_node_stack_frame; 1211 } 1212 } 1213 1214 one_stack_frame_backwards: 1215 if (NULL != sf->prev) { 1216 struct btrfsic_stack_frame *const prev = sf->prev; 1217 1218 /* the one for the initial block is freed in the caller */ 1219 btrfsic_release_block_ctx(sf->block_ctx); 1220 1221 if (sf->error) { 1222 prev->error = sf->error; 1223 btrfsic_stack_frame_free(sf); 1224 sf = prev; 1225 goto one_stack_frame_backwards; 1226 } 1227 1228 btrfsic_stack_frame_free(sf); 1229 sf = prev; 1230 goto continue_with_new_stack_frame; 1231 } else { 1232 BUG_ON(&initial_stack_frame != sf); 1233 } 1234 1235 return sf->error; 1236 } 1237 1238 static void btrfsic_read_from_block_data( 1239 struct btrfsic_block_data_ctx *block_ctx, 1240 void *dstv, u32 offset, size_t len) 1241 { 1242 size_t cur; 1243 size_t offset_in_page; 1244 char *kaddr; 1245 char *dst = (char *)dstv; 1246 size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1); 1247 unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT; 1248 1249 WARN_ON(offset + len > block_ctx->len); 1250 offset_in_page = (start_offset + offset) & (PAGE_CACHE_SIZE - 1); 1251 1252 while (len > 0) { 1253 cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page)); 1254 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE)); 1255 kaddr = block_ctx->datav[i]; 1256 memcpy(dst, kaddr + offset_in_page, cur); 1257 1258 dst += cur; 1259 len -= cur; 1260 offset_in_page = 0; 1261 i++; 1262 } 1263 } 1264 1265 static int btrfsic_create_link_to_next_block( 1266 struct btrfsic_state *state, 1267 struct btrfsic_block *block, 1268 struct btrfsic_block_data_ctx *block_ctx, 1269 u64 next_bytenr, 1270 int limit_nesting, 1271 struct btrfsic_block_data_ctx *next_block_ctx, 1272 struct btrfsic_block **next_blockp, 1273 int force_iodone_flag, 1274 int *num_copiesp, int *mirror_nump, 1275 struct btrfs_disk_key *disk_key, 1276 u64 parent_generation) 1277 { 1278 struct btrfsic_block *next_block = NULL; 1279 int ret; 1280 struct btrfsic_block_link *l; 1281 int did_alloc_block_link; 1282 int block_was_created; 1283 1284 *next_blockp = NULL; 1285 if (0 == *num_copiesp) { 1286 *num_copiesp = 1287 btrfs_num_copies(state->root->fs_info, 1288 next_bytenr, state->metablock_size); 1289 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1290 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 1291 next_bytenr, *num_copiesp); 1292 *mirror_nump = 1; 1293 } 1294 1295 if (*mirror_nump > *num_copiesp) 1296 return 0; 1297 1298 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1299 printk(KERN_INFO 1300 "btrfsic_create_link_to_next_block(mirror_num=%d)\n", 1301 *mirror_nump); 1302 ret = btrfsic_map_block(state, next_bytenr, 1303 state->metablock_size, 1304 next_block_ctx, *mirror_nump); 1305 if (ret) { 1306 printk(KERN_INFO 1307 "btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", 1308 next_bytenr, *mirror_nump); 1309 btrfsic_release_block_ctx(next_block_ctx); 1310 *next_blockp = NULL; 1311 return -1; 1312 } 1313 1314 next_block = btrfsic_block_lookup_or_add(state, 1315 next_block_ctx, "referenced ", 1316 1, force_iodone_flag, 1317 !force_iodone_flag, 1318 *mirror_nump, 1319 &block_was_created); 1320 if (NULL == next_block) { 1321 btrfsic_release_block_ctx(next_block_ctx); 1322 *next_blockp = NULL; 1323 return -1; 1324 } 1325 if (block_was_created) { 1326 l = NULL; 1327 next_block->generation = BTRFSIC_GENERATION_UNKNOWN; 1328 } else { 1329 if (next_block->logical_bytenr != next_bytenr && 1330 !(!next_block->is_metadata && 1331 0 == next_block->logical_bytenr)) { 1332 printk(KERN_INFO 1333 "Referenced block @%llu (%s/%llu/%d)" 1334 " found in hash table, %c," 1335 " bytenr mismatch (!= stored %llu).\n", 1336 next_bytenr, next_block_ctx->dev->name, 1337 next_block_ctx->dev_bytenr, *mirror_nump, 1338 btrfsic_get_block_type(state, next_block), 1339 next_block->logical_bytenr); 1340 } else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1341 printk(KERN_INFO 1342 "Referenced block @%llu (%s/%llu/%d)" 1343 " found in hash table, %c.\n", 1344 next_bytenr, next_block_ctx->dev->name, 1345 next_block_ctx->dev_bytenr, *mirror_nump, 1346 btrfsic_get_block_type(state, next_block)); 1347 next_block->logical_bytenr = next_bytenr; 1348 1349 next_block->mirror_num = *mirror_nump; 1350 l = btrfsic_block_link_hashtable_lookup( 1351 next_block_ctx->dev->bdev, 1352 next_block_ctx->dev_bytenr, 1353 block_ctx->dev->bdev, 1354 block_ctx->dev_bytenr, 1355 &state->block_link_hashtable); 1356 } 1357 1358 next_block->disk_key = *disk_key; 1359 if (NULL == l) { 1360 l = btrfsic_block_link_alloc(); 1361 if (NULL == l) { 1362 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 1363 btrfsic_release_block_ctx(next_block_ctx); 1364 *next_blockp = NULL; 1365 return -1; 1366 } 1367 1368 did_alloc_block_link = 1; 1369 l->block_ref_to = next_block; 1370 l->block_ref_from = block; 1371 l->ref_cnt = 1; 1372 l->parent_generation = parent_generation; 1373 1374 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1375 btrfsic_print_add_link(state, l); 1376 1377 list_add(&l->node_ref_to, &block->ref_to_list); 1378 list_add(&l->node_ref_from, &next_block->ref_from_list); 1379 1380 btrfsic_block_link_hashtable_add(l, 1381 &state->block_link_hashtable); 1382 } else { 1383 did_alloc_block_link = 0; 1384 if (0 == limit_nesting) { 1385 l->ref_cnt++; 1386 l->parent_generation = parent_generation; 1387 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1388 btrfsic_print_add_link(state, l); 1389 } 1390 } 1391 1392 if (limit_nesting > 0 && did_alloc_block_link) { 1393 ret = btrfsic_read_block(state, next_block_ctx); 1394 if (ret < (int)next_block_ctx->len) { 1395 printk(KERN_INFO 1396 "btrfsic: read block @logical %llu failed!\n", 1397 next_bytenr); 1398 btrfsic_release_block_ctx(next_block_ctx); 1399 *next_blockp = NULL; 1400 return -1; 1401 } 1402 1403 *next_blockp = next_block; 1404 } else { 1405 *next_blockp = NULL; 1406 } 1407 (*mirror_nump)++; 1408 1409 return 0; 1410 } 1411 1412 static int btrfsic_handle_extent_data( 1413 struct btrfsic_state *state, 1414 struct btrfsic_block *block, 1415 struct btrfsic_block_data_ctx *block_ctx, 1416 u32 item_offset, int force_iodone_flag) 1417 { 1418 int ret; 1419 struct btrfs_file_extent_item file_extent_item; 1420 u64 file_extent_item_offset; 1421 u64 next_bytenr; 1422 u64 num_bytes; 1423 u64 generation; 1424 struct btrfsic_block_link *l; 1425 1426 file_extent_item_offset = offsetof(struct btrfs_leaf, items) + 1427 item_offset; 1428 if (file_extent_item_offset + 1429 offsetof(struct btrfs_file_extent_item, disk_num_bytes) > 1430 block_ctx->len) { 1431 printk(KERN_INFO 1432 "btrfsic: file item out of bounce at logical %llu, dev %s\n", 1433 block_ctx->start, block_ctx->dev->name); 1434 return -1; 1435 } 1436 1437 btrfsic_read_from_block_data(block_ctx, &file_extent_item, 1438 file_extent_item_offset, 1439 offsetof(struct btrfs_file_extent_item, disk_num_bytes)); 1440 if (BTRFS_FILE_EXTENT_REG != file_extent_item.type || 1441 btrfs_stack_file_extent_disk_bytenr(&file_extent_item) == 0) { 1442 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1443 printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu\n", 1444 file_extent_item.type, 1445 btrfs_stack_file_extent_disk_bytenr( 1446 &file_extent_item)); 1447 return 0; 1448 } 1449 1450 if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) > 1451 block_ctx->len) { 1452 printk(KERN_INFO 1453 "btrfsic: file item out of bounce at logical %llu, dev %s\n", 1454 block_ctx->start, block_ctx->dev->name); 1455 return -1; 1456 } 1457 btrfsic_read_from_block_data(block_ctx, &file_extent_item, 1458 file_extent_item_offset, 1459 sizeof(struct btrfs_file_extent_item)); 1460 next_bytenr = btrfs_stack_file_extent_disk_bytenr(&file_extent_item); 1461 if (btrfs_stack_file_extent_compression(&file_extent_item) == 1462 BTRFS_COMPRESS_NONE) { 1463 next_bytenr += btrfs_stack_file_extent_offset(&file_extent_item); 1464 num_bytes = btrfs_stack_file_extent_num_bytes(&file_extent_item); 1465 } else { 1466 num_bytes = btrfs_stack_file_extent_disk_num_bytes(&file_extent_item); 1467 } 1468 generation = btrfs_stack_file_extent_generation(&file_extent_item); 1469 1470 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1471 printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu," 1472 " offset = %llu, num_bytes = %llu\n", 1473 file_extent_item.type, 1474 btrfs_stack_file_extent_disk_bytenr(&file_extent_item), 1475 btrfs_stack_file_extent_offset(&file_extent_item), 1476 num_bytes); 1477 while (num_bytes > 0) { 1478 u32 chunk_len; 1479 int num_copies; 1480 int mirror_num; 1481 1482 if (num_bytes > state->datablock_size) 1483 chunk_len = state->datablock_size; 1484 else 1485 chunk_len = num_bytes; 1486 1487 num_copies = 1488 btrfs_num_copies(state->root->fs_info, 1489 next_bytenr, state->datablock_size); 1490 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1491 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 1492 next_bytenr, num_copies); 1493 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 1494 struct btrfsic_block_data_ctx next_block_ctx; 1495 struct btrfsic_block *next_block; 1496 int block_was_created; 1497 1498 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1499 printk(KERN_INFO "btrfsic_handle_extent_data(" 1500 "mirror_num=%d)\n", mirror_num); 1501 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1502 printk(KERN_INFO 1503 "\tdisk_bytenr = %llu, num_bytes %u\n", 1504 next_bytenr, chunk_len); 1505 ret = btrfsic_map_block(state, next_bytenr, 1506 chunk_len, &next_block_ctx, 1507 mirror_num); 1508 if (ret) { 1509 printk(KERN_INFO 1510 "btrfsic: btrfsic_map_block(@%llu," 1511 " mirror=%d) failed!\n", 1512 next_bytenr, mirror_num); 1513 return -1; 1514 } 1515 1516 next_block = btrfsic_block_lookup_or_add( 1517 state, 1518 &next_block_ctx, 1519 "referenced ", 1520 0, 1521 force_iodone_flag, 1522 !force_iodone_flag, 1523 mirror_num, 1524 &block_was_created); 1525 if (NULL == next_block) { 1526 printk(KERN_INFO 1527 "btrfsic: error, kmalloc failed!\n"); 1528 btrfsic_release_block_ctx(&next_block_ctx); 1529 return -1; 1530 } 1531 if (!block_was_created) { 1532 if (next_block->logical_bytenr != next_bytenr && 1533 !(!next_block->is_metadata && 1534 0 == next_block->logical_bytenr)) { 1535 printk(KERN_INFO 1536 "Referenced block" 1537 " @%llu (%s/%llu/%d)" 1538 " found in hash table, D," 1539 " bytenr mismatch" 1540 " (!= stored %llu).\n", 1541 next_bytenr, 1542 next_block_ctx.dev->name, 1543 next_block_ctx.dev_bytenr, 1544 mirror_num, 1545 next_block->logical_bytenr); 1546 } 1547 next_block->logical_bytenr = next_bytenr; 1548 next_block->mirror_num = mirror_num; 1549 } 1550 1551 l = btrfsic_block_link_lookup_or_add(state, 1552 &next_block_ctx, 1553 next_block, block, 1554 generation); 1555 btrfsic_release_block_ctx(&next_block_ctx); 1556 if (NULL == l) 1557 return -1; 1558 } 1559 1560 next_bytenr += chunk_len; 1561 num_bytes -= chunk_len; 1562 } 1563 1564 return 0; 1565 } 1566 1567 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 1568 struct btrfsic_block_data_ctx *block_ctx_out, 1569 int mirror_num) 1570 { 1571 int ret; 1572 u64 length; 1573 struct btrfs_bio *multi = NULL; 1574 struct btrfs_device *device; 1575 1576 length = len; 1577 ret = btrfs_map_block(state->root->fs_info, READ, 1578 bytenr, &length, &multi, mirror_num); 1579 1580 if (ret) { 1581 block_ctx_out->start = 0; 1582 block_ctx_out->dev_bytenr = 0; 1583 block_ctx_out->len = 0; 1584 block_ctx_out->dev = NULL; 1585 block_ctx_out->datav = NULL; 1586 block_ctx_out->pagev = NULL; 1587 block_ctx_out->mem_to_free = NULL; 1588 1589 return ret; 1590 } 1591 1592 device = multi->stripes[0].dev; 1593 block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev); 1594 block_ctx_out->dev_bytenr = multi->stripes[0].physical; 1595 block_ctx_out->start = bytenr; 1596 block_ctx_out->len = len; 1597 block_ctx_out->datav = NULL; 1598 block_ctx_out->pagev = NULL; 1599 block_ctx_out->mem_to_free = NULL; 1600 1601 kfree(multi); 1602 if (NULL == block_ctx_out->dev) { 1603 ret = -ENXIO; 1604 printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n"); 1605 } 1606 1607 return ret; 1608 } 1609 1610 static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr, 1611 u32 len, struct block_device *bdev, 1612 struct btrfsic_block_data_ctx *block_ctx_out) 1613 { 1614 block_ctx_out->dev = btrfsic_dev_state_lookup(bdev); 1615 block_ctx_out->dev_bytenr = bytenr; 1616 block_ctx_out->start = bytenr; 1617 block_ctx_out->len = len; 1618 block_ctx_out->datav = NULL; 1619 block_ctx_out->pagev = NULL; 1620 block_ctx_out->mem_to_free = NULL; 1621 if (NULL != block_ctx_out->dev) { 1622 return 0; 1623 } else { 1624 printk(KERN_INFO "btrfsic: error, cannot lookup dev (#2)!\n"); 1625 return -ENXIO; 1626 } 1627 } 1628 1629 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx) 1630 { 1631 if (block_ctx->mem_to_free) { 1632 unsigned int num_pages; 1633 1634 BUG_ON(!block_ctx->datav); 1635 BUG_ON(!block_ctx->pagev); 1636 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> 1637 PAGE_CACHE_SHIFT; 1638 while (num_pages > 0) { 1639 num_pages--; 1640 if (block_ctx->datav[num_pages]) { 1641 kunmap(block_ctx->pagev[num_pages]); 1642 block_ctx->datav[num_pages] = NULL; 1643 } 1644 if (block_ctx->pagev[num_pages]) { 1645 __free_page(block_ctx->pagev[num_pages]); 1646 block_ctx->pagev[num_pages] = NULL; 1647 } 1648 } 1649 1650 kfree(block_ctx->mem_to_free); 1651 block_ctx->mem_to_free = NULL; 1652 block_ctx->pagev = NULL; 1653 block_ctx->datav = NULL; 1654 } 1655 } 1656 1657 static int btrfsic_read_block(struct btrfsic_state *state, 1658 struct btrfsic_block_data_ctx *block_ctx) 1659 { 1660 unsigned int num_pages; 1661 unsigned int i; 1662 u64 dev_bytenr; 1663 int ret; 1664 1665 BUG_ON(block_ctx->datav); 1666 BUG_ON(block_ctx->pagev); 1667 BUG_ON(block_ctx->mem_to_free); 1668 if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) { 1669 printk(KERN_INFO 1670 "btrfsic: read_block() with unaligned bytenr %llu\n", 1671 block_ctx->dev_bytenr); 1672 return -1; 1673 } 1674 1675 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> 1676 PAGE_CACHE_SHIFT; 1677 block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) + 1678 sizeof(*block_ctx->pagev)) * 1679 num_pages, GFP_NOFS); 1680 if (!block_ctx->mem_to_free) 1681 return -1; 1682 block_ctx->datav = block_ctx->mem_to_free; 1683 block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages); 1684 for (i = 0; i < num_pages; i++) { 1685 block_ctx->pagev[i] = alloc_page(GFP_NOFS); 1686 if (!block_ctx->pagev[i]) 1687 return -1; 1688 } 1689 1690 dev_bytenr = block_ctx->dev_bytenr; 1691 for (i = 0; i < num_pages;) { 1692 struct bio *bio; 1693 unsigned int j; 1694 1695 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); 1696 if (!bio) { 1697 printk(KERN_INFO 1698 "btrfsic: bio_alloc() for %u pages failed!\n", 1699 num_pages - i); 1700 return -1; 1701 } 1702 bio->bi_bdev = block_ctx->dev->bdev; 1703 bio->bi_iter.bi_sector = dev_bytenr >> 9; 1704 1705 for (j = i; j < num_pages; j++) { 1706 ret = bio_add_page(bio, block_ctx->pagev[j], 1707 PAGE_CACHE_SIZE, 0); 1708 if (PAGE_CACHE_SIZE != ret) 1709 break; 1710 } 1711 if (j == i) { 1712 printk(KERN_INFO 1713 "btrfsic: error, failed to add a single page!\n"); 1714 return -1; 1715 } 1716 if (submit_bio_wait(READ, bio)) { 1717 printk(KERN_INFO 1718 "btrfsic: read error at logical %llu dev %s!\n", 1719 block_ctx->start, block_ctx->dev->name); 1720 bio_put(bio); 1721 return -1; 1722 } 1723 bio_put(bio); 1724 dev_bytenr += (j - i) * PAGE_CACHE_SIZE; 1725 i = j; 1726 } 1727 for (i = 0; i < num_pages; i++) { 1728 block_ctx->datav[i] = kmap(block_ctx->pagev[i]); 1729 if (!block_ctx->datav[i]) { 1730 printk(KERN_INFO "btrfsic: kmap() failed (dev %s)!\n", 1731 block_ctx->dev->name); 1732 return -1; 1733 } 1734 } 1735 1736 return block_ctx->len; 1737 } 1738 1739 static void btrfsic_dump_database(struct btrfsic_state *state) 1740 { 1741 struct list_head *elem_all; 1742 1743 BUG_ON(NULL == state); 1744 1745 printk(KERN_INFO "all_blocks_list:\n"); 1746 list_for_each(elem_all, &state->all_blocks_list) { 1747 const struct btrfsic_block *const b_all = 1748 list_entry(elem_all, struct btrfsic_block, 1749 all_blocks_node); 1750 struct list_head *elem_ref_to; 1751 struct list_head *elem_ref_from; 1752 1753 printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n", 1754 btrfsic_get_block_type(state, b_all), 1755 b_all->logical_bytenr, b_all->dev_state->name, 1756 b_all->dev_bytenr, b_all->mirror_num); 1757 1758 list_for_each(elem_ref_to, &b_all->ref_to_list) { 1759 const struct btrfsic_block_link *const l = 1760 list_entry(elem_ref_to, 1761 struct btrfsic_block_link, 1762 node_ref_to); 1763 1764 printk(KERN_INFO " %c @%llu (%s/%llu/%d)" 1765 " refers %u* to" 1766 " %c @%llu (%s/%llu/%d)\n", 1767 btrfsic_get_block_type(state, b_all), 1768 b_all->logical_bytenr, b_all->dev_state->name, 1769 b_all->dev_bytenr, b_all->mirror_num, 1770 l->ref_cnt, 1771 btrfsic_get_block_type(state, l->block_ref_to), 1772 l->block_ref_to->logical_bytenr, 1773 l->block_ref_to->dev_state->name, 1774 l->block_ref_to->dev_bytenr, 1775 l->block_ref_to->mirror_num); 1776 } 1777 1778 list_for_each(elem_ref_from, &b_all->ref_from_list) { 1779 const struct btrfsic_block_link *const l = 1780 list_entry(elem_ref_from, 1781 struct btrfsic_block_link, 1782 node_ref_from); 1783 1784 printk(KERN_INFO " %c @%llu (%s/%llu/%d)" 1785 " is ref %u* from" 1786 " %c @%llu (%s/%llu/%d)\n", 1787 btrfsic_get_block_type(state, b_all), 1788 b_all->logical_bytenr, b_all->dev_state->name, 1789 b_all->dev_bytenr, b_all->mirror_num, 1790 l->ref_cnt, 1791 btrfsic_get_block_type(state, l->block_ref_from), 1792 l->block_ref_from->logical_bytenr, 1793 l->block_ref_from->dev_state->name, 1794 l->block_ref_from->dev_bytenr, 1795 l->block_ref_from->mirror_num); 1796 } 1797 1798 printk(KERN_INFO "\n"); 1799 } 1800 } 1801 1802 /* 1803 * Test whether the disk block contains a tree block (leaf or node) 1804 * (note that this test fails for the super block) 1805 */ 1806 static int btrfsic_test_for_metadata(struct btrfsic_state *state, 1807 char **datav, unsigned int num_pages) 1808 { 1809 struct btrfs_header *h; 1810 u8 csum[BTRFS_CSUM_SIZE]; 1811 u32 crc = ~(u32)0; 1812 unsigned int i; 1813 1814 if (num_pages * PAGE_CACHE_SIZE < state->metablock_size) 1815 return 1; /* not metadata */ 1816 num_pages = state->metablock_size >> PAGE_CACHE_SHIFT; 1817 h = (struct btrfs_header *)datav[0]; 1818 1819 if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE)) 1820 return 1; 1821 1822 for (i = 0; i < num_pages; i++) { 1823 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE); 1824 size_t sublen = i ? PAGE_CACHE_SIZE : 1825 (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE); 1826 1827 crc = btrfs_crc32c(crc, data, sublen); 1828 } 1829 btrfs_csum_final(crc, csum); 1830 if (memcmp(csum, h->csum, state->csum_size)) 1831 return 1; 1832 1833 return 0; /* is metadata */ 1834 } 1835 1836 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 1837 u64 dev_bytenr, char **mapped_datav, 1838 unsigned int num_pages, 1839 struct bio *bio, int *bio_is_patched, 1840 struct buffer_head *bh, 1841 int submit_bio_bh_rw) 1842 { 1843 int is_metadata; 1844 struct btrfsic_block *block; 1845 struct btrfsic_block_data_ctx block_ctx; 1846 int ret; 1847 struct btrfsic_state *state = dev_state->state; 1848 struct block_device *bdev = dev_state->bdev; 1849 unsigned int processed_len; 1850 1851 if (NULL != bio_is_patched) 1852 *bio_is_patched = 0; 1853 1854 again: 1855 if (num_pages == 0) 1856 return; 1857 1858 processed_len = 0; 1859 is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav, 1860 num_pages)); 1861 1862 block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr, 1863 &state->block_hashtable); 1864 if (NULL != block) { 1865 u64 bytenr = 0; 1866 struct list_head *elem_ref_to; 1867 struct list_head *tmp_ref_to; 1868 1869 if (block->is_superblock) { 1870 bytenr = btrfs_super_bytenr((struct btrfs_super_block *) 1871 mapped_datav[0]); 1872 if (num_pages * PAGE_CACHE_SIZE < 1873 BTRFS_SUPER_INFO_SIZE) { 1874 printk(KERN_INFO 1875 "btrfsic: cannot work with too short bios!\n"); 1876 return; 1877 } 1878 is_metadata = 1; 1879 BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1)); 1880 processed_len = BTRFS_SUPER_INFO_SIZE; 1881 if (state->print_mask & 1882 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { 1883 printk(KERN_INFO 1884 "[before new superblock is written]:\n"); 1885 btrfsic_dump_tree_sub(state, block, 0); 1886 } 1887 } 1888 if (is_metadata) { 1889 if (!block->is_superblock) { 1890 if (num_pages * PAGE_CACHE_SIZE < 1891 state->metablock_size) { 1892 printk(KERN_INFO 1893 "btrfsic: cannot work with too short bios!\n"); 1894 return; 1895 } 1896 processed_len = state->metablock_size; 1897 bytenr = btrfs_stack_header_bytenr( 1898 (struct btrfs_header *) 1899 mapped_datav[0]); 1900 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, 1901 dev_state, 1902 dev_bytenr); 1903 } 1904 if (block->logical_bytenr != bytenr && 1905 !(!block->is_metadata && 1906 block->logical_bytenr == 0)) 1907 printk(KERN_INFO 1908 "Written block @%llu (%s/%llu/%d)" 1909 " found in hash table, %c," 1910 " bytenr mismatch" 1911 " (!= stored %llu).\n", 1912 bytenr, dev_state->name, dev_bytenr, 1913 block->mirror_num, 1914 btrfsic_get_block_type(state, block), 1915 block->logical_bytenr); 1916 else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1917 printk(KERN_INFO 1918 "Written block @%llu (%s/%llu/%d)" 1919 " found in hash table, %c.\n", 1920 bytenr, dev_state->name, dev_bytenr, 1921 block->mirror_num, 1922 btrfsic_get_block_type(state, block)); 1923 block->logical_bytenr = bytenr; 1924 } else { 1925 if (num_pages * PAGE_CACHE_SIZE < 1926 state->datablock_size) { 1927 printk(KERN_INFO 1928 "btrfsic: cannot work with too short bios!\n"); 1929 return; 1930 } 1931 processed_len = state->datablock_size; 1932 bytenr = block->logical_bytenr; 1933 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1934 printk(KERN_INFO 1935 "Written block @%llu (%s/%llu/%d)" 1936 " found in hash table, %c.\n", 1937 bytenr, dev_state->name, dev_bytenr, 1938 block->mirror_num, 1939 btrfsic_get_block_type(state, block)); 1940 } 1941 1942 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1943 printk(KERN_INFO 1944 "ref_to_list: %cE, ref_from_list: %cE\n", 1945 list_empty(&block->ref_to_list) ? ' ' : '!', 1946 list_empty(&block->ref_from_list) ? ' ' : '!'); 1947 if (btrfsic_is_block_ref_by_superblock(state, block, 0)) { 1948 printk(KERN_INFO "btrfs: attempt to overwrite %c-block" 1949 " @%llu (%s/%llu/%d), old(gen=%llu," 1950 " objectid=%llu, type=%d, offset=%llu)," 1951 " new(gen=%llu)," 1952 " which is referenced by most recent superblock" 1953 " (superblockgen=%llu)!\n", 1954 btrfsic_get_block_type(state, block), bytenr, 1955 dev_state->name, dev_bytenr, block->mirror_num, 1956 block->generation, 1957 btrfs_disk_key_objectid(&block->disk_key), 1958 block->disk_key.type, 1959 btrfs_disk_key_offset(&block->disk_key), 1960 btrfs_stack_header_generation( 1961 (struct btrfs_header *) mapped_datav[0]), 1962 state->max_superblock_generation); 1963 btrfsic_dump_tree(state); 1964 } 1965 1966 if (!block->is_iodone && !block->never_written) { 1967 printk(KERN_INFO "btrfs: attempt to overwrite %c-block" 1968 " @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu," 1969 " which is not yet iodone!\n", 1970 btrfsic_get_block_type(state, block), bytenr, 1971 dev_state->name, dev_bytenr, block->mirror_num, 1972 block->generation, 1973 btrfs_stack_header_generation( 1974 (struct btrfs_header *) 1975 mapped_datav[0])); 1976 /* it would not be safe to go on */ 1977 btrfsic_dump_tree(state); 1978 goto continue_loop; 1979 } 1980 1981 /* 1982 * Clear all references of this block. Do not free 1983 * the block itself even if is not referenced anymore 1984 * because it still carries valueable information 1985 * like whether it was ever written and IO completed. 1986 */ 1987 list_for_each_safe(elem_ref_to, tmp_ref_to, 1988 &block->ref_to_list) { 1989 struct btrfsic_block_link *const l = 1990 list_entry(elem_ref_to, 1991 struct btrfsic_block_link, 1992 node_ref_to); 1993 1994 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1995 btrfsic_print_rem_link(state, l); 1996 l->ref_cnt--; 1997 if (0 == l->ref_cnt) { 1998 list_del(&l->node_ref_to); 1999 list_del(&l->node_ref_from); 2000 btrfsic_block_link_hashtable_remove(l); 2001 btrfsic_block_link_free(l); 2002 } 2003 } 2004 2005 if (block->is_superblock) 2006 ret = btrfsic_map_superblock(state, bytenr, 2007 processed_len, 2008 bdev, &block_ctx); 2009 else 2010 ret = btrfsic_map_block(state, bytenr, processed_len, 2011 &block_ctx, 0); 2012 if (ret) { 2013 printk(KERN_INFO 2014 "btrfsic: btrfsic_map_block(root @%llu)" 2015 " failed!\n", bytenr); 2016 goto continue_loop; 2017 } 2018 block_ctx.datav = mapped_datav; 2019 /* the following is required in case of writes to mirrors, 2020 * use the same that was used for the lookup */ 2021 block_ctx.dev = dev_state; 2022 block_ctx.dev_bytenr = dev_bytenr; 2023 2024 if (is_metadata || state->include_extent_data) { 2025 block->never_written = 0; 2026 block->iodone_w_error = 0; 2027 if (NULL != bio) { 2028 block->is_iodone = 0; 2029 BUG_ON(NULL == bio_is_patched); 2030 if (!*bio_is_patched) { 2031 block->orig_bio_bh_private = 2032 bio->bi_private; 2033 block->orig_bio_bh_end_io.bio = 2034 bio->bi_end_io; 2035 block->next_in_same_bio = NULL; 2036 bio->bi_private = block; 2037 bio->bi_end_io = btrfsic_bio_end_io; 2038 *bio_is_patched = 1; 2039 } else { 2040 struct btrfsic_block *chained_block = 2041 (struct btrfsic_block *) 2042 bio->bi_private; 2043 2044 BUG_ON(NULL == chained_block); 2045 block->orig_bio_bh_private = 2046 chained_block->orig_bio_bh_private; 2047 block->orig_bio_bh_end_io.bio = 2048 chained_block->orig_bio_bh_end_io. 2049 bio; 2050 block->next_in_same_bio = chained_block; 2051 bio->bi_private = block; 2052 } 2053 } else if (NULL != bh) { 2054 block->is_iodone = 0; 2055 block->orig_bio_bh_private = bh->b_private; 2056 block->orig_bio_bh_end_io.bh = bh->b_end_io; 2057 block->next_in_same_bio = NULL; 2058 bh->b_private = block; 2059 bh->b_end_io = btrfsic_bh_end_io; 2060 } else { 2061 block->is_iodone = 1; 2062 block->orig_bio_bh_private = NULL; 2063 block->orig_bio_bh_end_io.bio = NULL; 2064 block->next_in_same_bio = NULL; 2065 } 2066 } 2067 2068 block->flush_gen = dev_state->last_flush_gen + 1; 2069 block->submit_bio_bh_rw = submit_bio_bh_rw; 2070 if (is_metadata) { 2071 block->logical_bytenr = bytenr; 2072 block->is_metadata = 1; 2073 if (block->is_superblock) { 2074 BUG_ON(PAGE_CACHE_SIZE != 2075 BTRFS_SUPER_INFO_SIZE); 2076 ret = btrfsic_process_written_superblock( 2077 state, 2078 block, 2079 (struct btrfs_super_block *) 2080 mapped_datav[0]); 2081 if (state->print_mask & 2082 BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) { 2083 printk(KERN_INFO 2084 "[after new superblock is written]:\n"); 2085 btrfsic_dump_tree_sub(state, block, 0); 2086 } 2087 } else { 2088 block->mirror_num = 0; /* unknown */ 2089 ret = btrfsic_process_metablock( 2090 state, 2091 block, 2092 &block_ctx, 2093 0, 0); 2094 } 2095 if (ret) 2096 printk(KERN_INFO 2097 "btrfsic: btrfsic_process_metablock" 2098 "(root @%llu) failed!\n", 2099 dev_bytenr); 2100 } else { 2101 block->is_metadata = 0; 2102 block->mirror_num = 0; /* unknown */ 2103 block->generation = BTRFSIC_GENERATION_UNKNOWN; 2104 if (!state->include_extent_data 2105 && list_empty(&block->ref_from_list)) { 2106 /* 2107 * disk block is overwritten with extent 2108 * data (not meta data) and we are configured 2109 * to not include extent data: take the 2110 * chance and free the block's memory 2111 */ 2112 btrfsic_block_hashtable_remove(block); 2113 list_del(&block->all_blocks_node); 2114 btrfsic_block_free(block); 2115 } 2116 } 2117 btrfsic_release_block_ctx(&block_ctx); 2118 } else { 2119 /* block has not been found in hash table */ 2120 u64 bytenr; 2121 2122 if (!is_metadata) { 2123 processed_len = state->datablock_size; 2124 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2125 printk(KERN_INFO "Written block (%s/%llu/?)" 2126 " !found in hash table, D.\n", 2127 dev_state->name, dev_bytenr); 2128 if (!state->include_extent_data) { 2129 /* ignore that written D block */ 2130 goto continue_loop; 2131 } 2132 2133 /* this is getting ugly for the 2134 * include_extent_data case... */ 2135 bytenr = 0; /* unknown */ 2136 block_ctx.start = bytenr; 2137 block_ctx.len = processed_len; 2138 block_ctx.mem_to_free = NULL; 2139 block_ctx.pagev = NULL; 2140 } else { 2141 processed_len = state->metablock_size; 2142 bytenr = btrfs_stack_header_bytenr( 2143 (struct btrfs_header *) 2144 mapped_datav[0]); 2145 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state, 2146 dev_bytenr); 2147 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2148 printk(KERN_INFO 2149 "Written block @%llu (%s/%llu/?)" 2150 " !found in hash table, M.\n", 2151 bytenr, dev_state->name, dev_bytenr); 2152 2153 ret = btrfsic_map_block(state, bytenr, processed_len, 2154 &block_ctx, 0); 2155 if (ret) { 2156 printk(KERN_INFO 2157 "btrfsic: btrfsic_map_block(root @%llu)" 2158 " failed!\n", 2159 dev_bytenr); 2160 goto continue_loop; 2161 } 2162 } 2163 block_ctx.datav = mapped_datav; 2164 /* the following is required in case of writes to mirrors, 2165 * use the same that was used for the lookup */ 2166 block_ctx.dev = dev_state; 2167 block_ctx.dev_bytenr = dev_bytenr; 2168 2169 block = btrfsic_block_alloc(); 2170 if (NULL == block) { 2171 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 2172 btrfsic_release_block_ctx(&block_ctx); 2173 goto continue_loop; 2174 } 2175 block->dev_state = dev_state; 2176 block->dev_bytenr = dev_bytenr; 2177 block->logical_bytenr = bytenr; 2178 block->is_metadata = is_metadata; 2179 block->never_written = 0; 2180 block->iodone_w_error = 0; 2181 block->mirror_num = 0; /* unknown */ 2182 block->flush_gen = dev_state->last_flush_gen + 1; 2183 block->submit_bio_bh_rw = submit_bio_bh_rw; 2184 if (NULL != bio) { 2185 block->is_iodone = 0; 2186 BUG_ON(NULL == bio_is_patched); 2187 if (!*bio_is_patched) { 2188 block->orig_bio_bh_private = bio->bi_private; 2189 block->orig_bio_bh_end_io.bio = bio->bi_end_io; 2190 block->next_in_same_bio = NULL; 2191 bio->bi_private = block; 2192 bio->bi_end_io = btrfsic_bio_end_io; 2193 *bio_is_patched = 1; 2194 } else { 2195 struct btrfsic_block *chained_block = 2196 (struct btrfsic_block *) 2197 bio->bi_private; 2198 2199 BUG_ON(NULL == chained_block); 2200 block->orig_bio_bh_private = 2201 chained_block->orig_bio_bh_private; 2202 block->orig_bio_bh_end_io.bio = 2203 chained_block->orig_bio_bh_end_io.bio; 2204 block->next_in_same_bio = chained_block; 2205 bio->bi_private = block; 2206 } 2207 } else if (NULL != bh) { 2208 block->is_iodone = 0; 2209 block->orig_bio_bh_private = bh->b_private; 2210 block->orig_bio_bh_end_io.bh = bh->b_end_io; 2211 block->next_in_same_bio = NULL; 2212 bh->b_private = block; 2213 bh->b_end_io = btrfsic_bh_end_io; 2214 } else { 2215 block->is_iodone = 1; 2216 block->orig_bio_bh_private = NULL; 2217 block->orig_bio_bh_end_io.bio = NULL; 2218 block->next_in_same_bio = NULL; 2219 } 2220 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2221 printk(KERN_INFO 2222 "New written %c-block @%llu (%s/%llu/%d)\n", 2223 is_metadata ? 'M' : 'D', 2224 block->logical_bytenr, block->dev_state->name, 2225 block->dev_bytenr, block->mirror_num); 2226 list_add(&block->all_blocks_node, &state->all_blocks_list); 2227 btrfsic_block_hashtable_add(block, &state->block_hashtable); 2228 2229 if (is_metadata) { 2230 ret = btrfsic_process_metablock(state, block, 2231 &block_ctx, 0, 0); 2232 if (ret) 2233 printk(KERN_INFO 2234 "btrfsic: process_metablock(root @%llu)" 2235 " failed!\n", 2236 dev_bytenr); 2237 } 2238 btrfsic_release_block_ctx(&block_ctx); 2239 } 2240 2241 continue_loop: 2242 BUG_ON(!processed_len); 2243 dev_bytenr += processed_len; 2244 mapped_datav += processed_len >> PAGE_CACHE_SHIFT; 2245 num_pages -= processed_len >> PAGE_CACHE_SHIFT; 2246 goto again; 2247 } 2248 2249 static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status) 2250 { 2251 struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private; 2252 int iodone_w_error; 2253 2254 /* mutex is not held! This is not save if IO is not yet completed 2255 * on umount */ 2256 iodone_w_error = 0; 2257 if (bio_error_status) 2258 iodone_w_error = 1; 2259 2260 BUG_ON(NULL == block); 2261 bp->bi_private = block->orig_bio_bh_private; 2262 bp->bi_end_io = block->orig_bio_bh_end_io.bio; 2263 2264 do { 2265 struct btrfsic_block *next_block; 2266 struct btrfsic_dev_state *const dev_state = block->dev_state; 2267 2268 if ((dev_state->state->print_mask & 2269 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2270 printk(KERN_INFO 2271 "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n", 2272 bio_error_status, 2273 btrfsic_get_block_type(dev_state->state, block), 2274 block->logical_bytenr, dev_state->name, 2275 block->dev_bytenr, block->mirror_num); 2276 next_block = block->next_in_same_bio; 2277 block->iodone_w_error = iodone_w_error; 2278 if (block->submit_bio_bh_rw & REQ_FLUSH) { 2279 dev_state->last_flush_gen++; 2280 if ((dev_state->state->print_mask & 2281 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2282 printk(KERN_INFO 2283 "bio_end_io() new %s flush_gen=%llu\n", 2284 dev_state->name, 2285 dev_state->last_flush_gen); 2286 } 2287 if (block->submit_bio_bh_rw & REQ_FUA) 2288 block->flush_gen = 0; /* FUA completed means block is 2289 * on disk */ 2290 block->is_iodone = 1; /* for FLUSH, this releases the block */ 2291 block = next_block; 2292 } while (NULL != block); 2293 2294 bp->bi_end_io(bp, bio_error_status); 2295 } 2296 2297 static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate) 2298 { 2299 struct btrfsic_block *block = (struct btrfsic_block *)bh->b_private; 2300 int iodone_w_error = !uptodate; 2301 struct btrfsic_dev_state *dev_state; 2302 2303 BUG_ON(NULL == block); 2304 dev_state = block->dev_state; 2305 if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2306 printk(KERN_INFO 2307 "bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n", 2308 iodone_w_error, 2309 btrfsic_get_block_type(dev_state->state, block), 2310 block->logical_bytenr, block->dev_state->name, 2311 block->dev_bytenr, block->mirror_num); 2312 2313 block->iodone_w_error = iodone_w_error; 2314 if (block->submit_bio_bh_rw & REQ_FLUSH) { 2315 dev_state->last_flush_gen++; 2316 if ((dev_state->state->print_mask & 2317 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2318 printk(KERN_INFO 2319 "bh_end_io() new %s flush_gen=%llu\n", 2320 dev_state->name, dev_state->last_flush_gen); 2321 } 2322 if (block->submit_bio_bh_rw & REQ_FUA) 2323 block->flush_gen = 0; /* FUA completed means block is on disk */ 2324 2325 bh->b_private = block->orig_bio_bh_private; 2326 bh->b_end_io = block->orig_bio_bh_end_io.bh; 2327 block->is_iodone = 1; /* for FLUSH, this releases the block */ 2328 bh->b_end_io(bh, uptodate); 2329 } 2330 2331 static int btrfsic_process_written_superblock( 2332 struct btrfsic_state *state, 2333 struct btrfsic_block *const superblock, 2334 struct btrfs_super_block *const super_hdr) 2335 { 2336 int pass; 2337 2338 superblock->generation = btrfs_super_generation(super_hdr); 2339 if (!(superblock->generation > state->max_superblock_generation || 2340 0 == state->max_superblock_generation)) { 2341 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2342 printk(KERN_INFO 2343 "btrfsic: superblock @%llu (%s/%llu/%d)" 2344 " with old gen %llu <= %llu\n", 2345 superblock->logical_bytenr, 2346 superblock->dev_state->name, 2347 superblock->dev_bytenr, superblock->mirror_num, 2348 btrfs_super_generation(super_hdr), 2349 state->max_superblock_generation); 2350 } else { 2351 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2352 printk(KERN_INFO 2353 "btrfsic: got new superblock @%llu (%s/%llu/%d)" 2354 " with new gen %llu > %llu\n", 2355 superblock->logical_bytenr, 2356 superblock->dev_state->name, 2357 superblock->dev_bytenr, superblock->mirror_num, 2358 btrfs_super_generation(super_hdr), 2359 state->max_superblock_generation); 2360 2361 state->max_superblock_generation = 2362 btrfs_super_generation(super_hdr); 2363 state->latest_superblock = superblock; 2364 } 2365 2366 for (pass = 0; pass < 3; pass++) { 2367 int ret; 2368 u64 next_bytenr; 2369 struct btrfsic_block *next_block; 2370 struct btrfsic_block_data_ctx tmp_next_block_ctx; 2371 struct btrfsic_block_link *l; 2372 int num_copies; 2373 int mirror_num; 2374 const char *additional_string = NULL; 2375 struct btrfs_disk_key tmp_disk_key = {0}; 2376 2377 btrfs_set_disk_key_objectid(&tmp_disk_key, 2378 BTRFS_ROOT_ITEM_KEY); 2379 btrfs_set_disk_key_objectid(&tmp_disk_key, 0); 2380 2381 switch (pass) { 2382 case 0: 2383 btrfs_set_disk_key_objectid(&tmp_disk_key, 2384 BTRFS_ROOT_TREE_OBJECTID); 2385 additional_string = "root "; 2386 next_bytenr = btrfs_super_root(super_hdr); 2387 if (state->print_mask & 2388 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2389 printk(KERN_INFO "root@%llu\n", next_bytenr); 2390 break; 2391 case 1: 2392 btrfs_set_disk_key_objectid(&tmp_disk_key, 2393 BTRFS_CHUNK_TREE_OBJECTID); 2394 additional_string = "chunk "; 2395 next_bytenr = btrfs_super_chunk_root(super_hdr); 2396 if (state->print_mask & 2397 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2398 printk(KERN_INFO "chunk@%llu\n", next_bytenr); 2399 break; 2400 case 2: 2401 btrfs_set_disk_key_objectid(&tmp_disk_key, 2402 BTRFS_TREE_LOG_OBJECTID); 2403 additional_string = "log "; 2404 next_bytenr = btrfs_super_log_root(super_hdr); 2405 if (0 == next_bytenr) 2406 continue; 2407 if (state->print_mask & 2408 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2409 printk(KERN_INFO "log@%llu\n", next_bytenr); 2410 break; 2411 } 2412 2413 num_copies = 2414 btrfs_num_copies(state->root->fs_info, 2415 next_bytenr, BTRFS_SUPER_INFO_SIZE); 2416 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 2417 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 2418 next_bytenr, num_copies); 2419 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2420 int was_created; 2421 2422 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2423 printk(KERN_INFO 2424 "btrfsic_process_written_superblock(" 2425 "mirror_num=%d)\n", mirror_num); 2426 ret = btrfsic_map_block(state, next_bytenr, 2427 BTRFS_SUPER_INFO_SIZE, 2428 &tmp_next_block_ctx, 2429 mirror_num); 2430 if (ret) { 2431 printk(KERN_INFO 2432 "btrfsic: btrfsic_map_block(@%llu," 2433 " mirror=%d) failed!\n", 2434 next_bytenr, mirror_num); 2435 return -1; 2436 } 2437 2438 next_block = btrfsic_block_lookup_or_add( 2439 state, 2440 &tmp_next_block_ctx, 2441 additional_string, 2442 1, 0, 1, 2443 mirror_num, 2444 &was_created); 2445 if (NULL == next_block) { 2446 printk(KERN_INFO 2447 "btrfsic: error, kmalloc failed!\n"); 2448 btrfsic_release_block_ctx(&tmp_next_block_ctx); 2449 return -1; 2450 } 2451 2452 next_block->disk_key = tmp_disk_key; 2453 if (was_created) 2454 next_block->generation = 2455 BTRFSIC_GENERATION_UNKNOWN; 2456 l = btrfsic_block_link_lookup_or_add( 2457 state, 2458 &tmp_next_block_ctx, 2459 next_block, 2460 superblock, 2461 BTRFSIC_GENERATION_UNKNOWN); 2462 btrfsic_release_block_ctx(&tmp_next_block_ctx); 2463 if (NULL == l) 2464 return -1; 2465 } 2466 } 2467 2468 if (WARN_ON(-1 == btrfsic_check_all_ref_blocks(state, superblock, 0))) 2469 btrfsic_dump_tree(state); 2470 2471 return 0; 2472 } 2473 2474 static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, 2475 struct btrfsic_block *const block, 2476 int recursion_level) 2477 { 2478 struct list_head *elem_ref_to; 2479 int ret = 0; 2480 2481 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { 2482 /* 2483 * Note that this situation can happen and does not 2484 * indicate an error in regular cases. It happens 2485 * when disk blocks are freed and later reused. 2486 * The check-integrity module is not aware of any 2487 * block free operations, it just recognizes block 2488 * write operations. Therefore it keeps the linkage 2489 * information for a block until a block is 2490 * rewritten. This can temporarily cause incorrect 2491 * and even circular linkage informations. This 2492 * causes no harm unless such blocks are referenced 2493 * by the most recent super block. 2494 */ 2495 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2496 printk(KERN_INFO 2497 "btrfsic: abort cyclic linkage (case 1).\n"); 2498 2499 return ret; 2500 } 2501 2502 /* 2503 * This algorithm is recursive because the amount of used stack 2504 * space is very small and the max recursion depth is limited. 2505 */ 2506 list_for_each(elem_ref_to, &block->ref_to_list) { 2507 const struct btrfsic_block_link *const l = 2508 list_entry(elem_ref_to, struct btrfsic_block_link, 2509 node_ref_to); 2510 2511 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2512 printk(KERN_INFO 2513 "rl=%d, %c @%llu (%s/%llu/%d)" 2514 " %u* refers to %c @%llu (%s/%llu/%d)\n", 2515 recursion_level, 2516 btrfsic_get_block_type(state, block), 2517 block->logical_bytenr, block->dev_state->name, 2518 block->dev_bytenr, block->mirror_num, 2519 l->ref_cnt, 2520 btrfsic_get_block_type(state, l->block_ref_to), 2521 l->block_ref_to->logical_bytenr, 2522 l->block_ref_to->dev_state->name, 2523 l->block_ref_to->dev_bytenr, 2524 l->block_ref_to->mirror_num); 2525 if (l->block_ref_to->never_written) { 2526 printk(KERN_INFO "btrfs: attempt to write superblock" 2527 " which references block %c @%llu (%s/%llu/%d)" 2528 " which is never written!\n", 2529 btrfsic_get_block_type(state, l->block_ref_to), 2530 l->block_ref_to->logical_bytenr, 2531 l->block_ref_to->dev_state->name, 2532 l->block_ref_to->dev_bytenr, 2533 l->block_ref_to->mirror_num); 2534 ret = -1; 2535 } else if (!l->block_ref_to->is_iodone) { 2536 printk(KERN_INFO "btrfs: attempt to write superblock" 2537 " which references block %c @%llu (%s/%llu/%d)" 2538 " which is not yet iodone!\n", 2539 btrfsic_get_block_type(state, l->block_ref_to), 2540 l->block_ref_to->logical_bytenr, 2541 l->block_ref_to->dev_state->name, 2542 l->block_ref_to->dev_bytenr, 2543 l->block_ref_to->mirror_num); 2544 ret = -1; 2545 } else if (l->block_ref_to->iodone_w_error) { 2546 printk(KERN_INFO "btrfs: attempt to write superblock" 2547 " which references block %c @%llu (%s/%llu/%d)" 2548 " which has write error!\n", 2549 btrfsic_get_block_type(state, l->block_ref_to), 2550 l->block_ref_to->logical_bytenr, 2551 l->block_ref_to->dev_state->name, 2552 l->block_ref_to->dev_bytenr, 2553 l->block_ref_to->mirror_num); 2554 ret = -1; 2555 } else if (l->parent_generation != 2556 l->block_ref_to->generation && 2557 BTRFSIC_GENERATION_UNKNOWN != 2558 l->parent_generation && 2559 BTRFSIC_GENERATION_UNKNOWN != 2560 l->block_ref_to->generation) { 2561 printk(KERN_INFO "btrfs: attempt to write superblock" 2562 " which references block %c @%llu (%s/%llu/%d)" 2563 " with generation %llu !=" 2564 " parent generation %llu!\n", 2565 btrfsic_get_block_type(state, l->block_ref_to), 2566 l->block_ref_to->logical_bytenr, 2567 l->block_ref_to->dev_state->name, 2568 l->block_ref_to->dev_bytenr, 2569 l->block_ref_to->mirror_num, 2570 l->block_ref_to->generation, 2571 l->parent_generation); 2572 ret = -1; 2573 } else if (l->block_ref_to->flush_gen > 2574 l->block_ref_to->dev_state->last_flush_gen) { 2575 printk(KERN_INFO "btrfs: attempt to write superblock" 2576 " which references block %c @%llu (%s/%llu/%d)" 2577 " which is not flushed out of disk's write cache" 2578 " (block flush_gen=%llu," 2579 " dev->flush_gen=%llu)!\n", 2580 btrfsic_get_block_type(state, l->block_ref_to), 2581 l->block_ref_to->logical_bytenr, 2582 l->block_ref_to->dev_state->name, 2583 l->block_ref_to->dev_bytenr, 2584 l->block_ref_to->mirror_num, block->flush_gen, 2585 l->block_ref_to->dev_state->last_flush_gen); 2586 ret = -1; 2587 } else if (-1 == btrfsic_check_all_ref_blocks(state, 2588 l->block_ref_to, 2589 recursion_level + 2590 1)) { 2591 ret = -1; 2592 } 2593 } 2594 2595 return ret; 2596 } 2597 2598 static int btrfsic_is_block_ref_by_superblock( 2599 const struct btrfsic_state *state, 2600 const struct btrfsic_block *block, 2601 int recursion_level) 2602 { 2603 struct list_head *elem_ref_from; 2604 2605 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { 2606 /* refer to comment at "abort cyclic linkage (case 1)" */ 2607 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2608 printk(KERN_INFO 2609 "btrfsic: abort cyclic linkage (case 2).\n"); 2610 2611 return 0; 2612 } 2613 2614 /* 2615 * This algorithm is recursive because the amount of used stack space 2616 * is very small and the max recursion depth is limited. 2617 */ 2618 list_for_each(elem_ref_from, &block->ref_from_list) { 2619 const struct btrfsic_block_link *const l = 2620 list_entry(elem_ref_from, struct btrfsic_block_link, 2621 node_ref_from); 2622 2623 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2624 printk(KERN_INFO 2625 "rl=%d, %c @%llu (%s/%llu/%d)" 2626 " is ref %u* from %c @%llu (%s/%llu/%d)\n", 2627 recursion_level, 2628 btrfsic_get_block_type(state, block), 2629 block->logical_bytenr, block->dev_state->name, 2630 block->dev_bytenr, block->mirror_num, 2631 l->ref_cnt, 2632 btrfsic_get_block_type(state, l->block_ref_from), 2633 l->block_ref_from->logical_bytenr, 2634 l->block_ref_from->dev_state->name, 2635 l->block_ref_from->dev_bytenr, 2636 l->block_ref_from->mirror_num); 2637 if (l->block_ref_from->is_superblock && 2638 state->latest_superblock->dev_bytenr == 2639 l->block_ref_from->dev_bytenr && 2640 state->latest_superblock->dev_state->bdev == 2641 l->block_ref_from->dev_state->bdev) 2642 return 1; 2643 else if (btrfsic_is_block_ref_by_superblock(state, 2644 l->block_ref_from, 2645 recursion_level + 2646 1)) 2647 return 1; 2648 } 2649 2650 return 0; 2651 } 2652 2653 static void btrfsic_print_add_link(const struct btrfsic_state *state, 2654 const struct btrfsic_block_link *l) 2655 { 2656 printk(KERN_INFO 2657 "Add %u* link from %c @%llu (%s/%llu/%d)" 2658 " to %c @%llu (%s/%llu/%d).\n", 2659 l->ref_cnt, 2660 btrfsic_get_block_type(state, l->block_ref_from), 2661 l->block_ref_from->logical_bytenr, 2662 l->block_ref_from->dev_state->name, 2663 l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, 2664 btrfsic_get_block_type(state, l->block_ref_to), 2665 l->block_ref_to->logical_bytenr, 2666 l->block_ref_to->dev_state->name, l->block_ref_to->dev_bytenr, 2667 l->block_ref_to->mirror_num); 2668 } 2669 2670 static void btrfsic_print_rem_link(const struct btrfsic_state *state, 2671 const struct btrfsic_block_link *l) 2672 { 2673 printk(KERN_INFO 2674 "Rem %u* link from %c @%llu (%s/%llu/%d)" 2675 " to %c @%llu (%s/%llu/%d).\n", 2676 l->ref_cnt, 2677 btrfsic_get_block_type(state, l->block_ref_from), 2678 l->block_ref_from->logical_bytenr, 2679 l->block_ref_from->dev_state->name, 2680 l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, 2681 btrfsic_get_block_type(state, l->block_ref_to), 2682 l->block_ref_to->logical_bytenr, 2683 l->block_ref_to->dev_state->name, l->block_ref_to->dev_bytenr, 2684 l->block_ref_to->mirror_num); 2685 } 2686 2687 static char btrfsic_get_block_type(const struct btrfsic_state *state, 2688 const struct btrfsic_block *block) 2689 { 2690 if (block->is_superblock && 2691 state->latest_superblock->dev_bytenr == block->dev_bytenr && 2692 state->latest_superblock->dev_state->bdev == block->dev_state->bdev) 2693 return 'S'; 2694 else if (block->is_superblock) 2695 return 's'; 2696 else if (block->is_metadata) 2697 return 'M'; 2698 else 2699 return 'D'; 2700 } 2701 2702 static void btrfsic_dump_tree(const struct btrfsic_state *state) 2703 { 2704 btrfsic_dump_tree_sub(state, state->latest_superblock, 0); 2705 } 2706 2707 static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, 2708 const struct btrfsic_block *block, 2709 int indent_level) 2710 { 2711 struct list_head *elem_ref_to; 2712 int indent_add; 2713 static char buf[80]; 2714 int cursor_position; 2715 2716 /* 2717 * Should better fill an on-stack buffer with a complete line and 2718 * dump it at once when it is time to print a newline character. 2719 */ 2720 2721 /* 2722 * This algorithm is recursive because the amount of used stack space 2723 * is very small and the max recursion depth is limited. 2724 */ 2725 indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)", 2726 btrfsic_get_block_type(state, block), 2727 block->logical_bytenr, block->dev_state->name, 2728 block->dev_bytenr, block->mirror_num); 2729 if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { 2730 printk("[...]\n"); 2731 return; 2732 } 2733 printk(buf); 2734 indent_level += indent_add; 2735 if (list_empty(&block->ref_to_list)) { 2736 printk("\n"); 2737 return; 2738 } 2739 if (block->mirror_num > 1 && 2740 !(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) { 2741 printk(" [...]\n"); 2742 return; 2743 } 2744 2745 cursor_position = indent_level; 2746 list_for_each(elem_ref_to, &block->ref_to_list) { 2747 const struct btrfsic_block_link *const l = 2748 list_entry(elem_ref_to, struct btrfsic_block_link, 2749 node_ref_to); 2750 2751 while (cursor_position < indent_level) { 2752 printk(" "); 2753 cursor_position++; 2754 } 2755 if (l->ref_cnt > 1) 2756 indent_add = sprintf(buf, " %d*--> ", l->ref_cnt); 2757 else 2758 indent_add = sprintf(buf, " --> "); 2759 if (indent_level + indent_add > 2760 BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { 2761 printk("[...]\n"); 2762 cursor_position = 0; 2763 continue; 2764 } 2765 2766 printk(buf); 2767 2768 btrfsic_dump_tree_sub(state, l->block_ref_to, 2769 indent_level + indent_add); 2770 cursor_position = 0; 2771 } 2772 } 2773 2774 static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( 2775 struct btrfsic_state *state, 2776 struct btrfsic_block_data_ctx *next_block_ctx, 2777 struct btrfsic_block *next_block, 2778 struct btrfsic_block *from_block, 2779 u64 parent_generation) 2780 { 2781 struct btrfsic_block_link *l; 2782 2783 l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev, 2784 next_block_ctx->dev_bytenr, 2785 from_block->dev_state->bdev, 2786 from_block->dev_bytenr, 2787 &state->block_link_hashtable); 2788 if (NULL == l) { 2789 l = btrfsic_block_link_alloc(); 2790 if (NULL == l) { 2791 printk(KERN_INFO 2792 "btrfsic: error, kmalloc" " failed!\n"); 2793 return NULL; 2794 } 2795 2796 l->block_ref_to = next_block; 2797 l->block_ref_from = from_block; 2798 l->ref_cnt = 1; 2799 l->parent_generation = parent_generation; 2800 2801 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2802 btrfsic_print_add_link(state, l); 2803 2804 list_add(&l->node_ref_to, &from_block->ref_to_list); 2805 list_add(&l->node_ref_from, &next_block->ref_from_list); 2806 2807 btrfsic_block_link_hashtable_add(l, 2808 &state->block_link_hashtable); 2809 } else { 2810 l->ref_cnt++; 2811 l->parent_generation = parent_generation; 2812 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2813 btrfsic_print_add_link(state, l); 2814 } 2815 2816 return l; 2817 } 2818 2819 static struct btrfsic_block *btrfsic_block_lookup_or_add( 2820 struct btrfsic_state *state, 2821 struct btrfsic_block_data_ctx *block_ctx, 2822 const char *additional_string, 2823 int is_metadata, 2824 int is_iodone, 2825 int never_written, 2826 int mirror_num, 2827 int *was_created) 2828 { 2829 struct btrfsic_block *block; 2830 2831 block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev, 2832 block_ctx->dev_bytenr, 2833 &state->block_hashtable); 2834 if (NULL == block) { 2835 struct btrfsic_dev_state *dev_state; 2836 2837 block = btrfsic_block_alloc(); 2838 if (NULL == block) { 2839 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 2840 return NULL; 2841 } 2842 dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev); 2843 if (NULL == dev_state) { 2844 printk(KERN_INFO 2845 "btrfsic: error, lookup dev_state failed!\n"); 2846 btrfsic_block_free(block); 2847 return NULL; 2848 } 2849 block->dev_state = dev_state; 2850 block->dev_bytenr = block_ctx->dev_bytenr; 2851 block->logical_bytenr = block_ctx->start; 2852 block->is_metadata = is_metadata; 2853 block->is_iodone = is_iodone; 2854 block->never_written = never_written; 2855 block->mirror_num = mirror_num; 2856 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2857 printk(KERN_INFO 2858 "New %s%c-block @%llu (%s/%llu/%d)\n", 2859 additional_string, 2860 btrfsic_get_block_type(state, block), 2861 block->logical_bytenr, dev_state->name, 2862 block->dev_bytenr, mirror_num); 2863 list_add(&block->all_blocks_node, &state->all_blocks_list); 2864 btrfsic_block_hashtable_add(block, &state->block_hashtable); 2865 if (NULL != was_created) 2866 *was_created = 1; 2867 } else { 2868 if (NULL != was_created) 2869 *was_created = 0; 2870 } 2871 2872 return block; 2873 } 2874 2875 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 2876 u64 bytenr, 2877 struct btrfsic_dev_state *dev_state, 2878 u64 dev_bytenr) 2879 { 2880 int num_copies; 2881 int mirror_num; 2882 int ret; 2883 struct btrfsic_block_data_ctx block_ctx; 2884 int match = 0; 2885 2886 num_copies = btrfs_num_copies(state->root->fs_info, 2887 bytenr, state->metablock_size); 2888 2889 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2890 ret = btrfsic_map_block(state, bytenr, state->metablock_size, 2891 &block_ctx, mirror_num); 2892 if (ret) { 2893 printk(KERN_INFO "btrfsic:" 2894 " btrfsic_map_block(logical @%llu," 2895 " mirror %d) failed!\n", 2896 bytenr, mirror_num); 2897 continue; 2898 } 2899 2900 if (dev_state->bdev == block_ctx.dev->bdev && 2901 dev_bytenr == block_ctx.dev_bytenr) { 2902 match++; 2903 btrfsic_release_block_ctx(&block_ctx); 2904 break; 2905 } 2906 btrfsic_release_block_ctx(&block_ctx); 2907 } 2908 2909 if (WARN_ON(!match)) { 2910 printk(KERN_INFO "btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio," 2911 " buffer->log_bytenr=%llu, submit_bio(bdev=%s," 2912 " phys_bytenr=%llu)!\n", 2913 bytenr, dev_state->name, dev_bytenr); 2914 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2915 ret = btrfsic_map_block(state, bytenr, 2916 state->metablock_size, 2917 &block_ctx, mirror_num); 2918 if (ret) 2919 continue; 2920 2921 printk(KERN_INFO "Read logical bytenr @%llu maps to" 2922 " (%s/%llu/%d)\n", 2923 bytenr, block_ctx.dev->name, 2924 block_ctx.dev_bytenr, mirror_num); 2925 } 2926 } 2927 } 2928 2929 static struct btrfsic_dev_state *btrfsic_dev_state_lookup( 2930 struct block_device *bdev) 2931 { 2932 struct btrfsic_dev_state *ds; 2933 2934 ds = btrfsic_dev_state_hashtable_lookup(bdev, 2935 &btrfsic_dev_state_hashtable); 2936 return ds; 2937 } 2938 2939 int btrfsic_submit_bh(int rw, struct buffer_head *bh) 2940 { 2941 struct btrfsic_dev_state *dev_state; 2942 2943 if (!btrfsic_is_initialized) 2944 return submit_bh(rw, bh); 2945 2946 mutex_lock(&btrfsic_mutex); 2947 /* since btrfsic_submit_bh() might also be called before 2948 * btrfsic_mount(), this might return NULL */ 2949 dev_state = btrfsic_dev_state_lookup(bh->b_bdev); 2950 2951 /* Only called to write the superblock (incl. FLUSH/FUA) */ 2952 if (NULL != dev_state && 2953 (rw & WRITE) && bh->b_size > 0) { 2954 u64 dev_bytenr; 2955 2956 dev_bytenr = 4096 * bh->b_blocknr; 2957 if (dev_state->state->print_mask & 2958 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2959 printk(KERN_INFO 2960 "submit_bh(rw=0x%x, blocknr=%llu (bytenr %llu)," 2961 " size=%zu, data=%p, bdev=%p)\n", 2962 rw, (unsigned long long)bh->b_blocknr, 2963 dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev); 2964 btrfsic_process_written_block(dev_state, dev_bytenr, 2965 &bh->b_data, 1, NULL, 2966 NULL, bh, rw); 2967 } else if (NULL != dev_state && (rw & REQ_FLUSH)) { 2968 if (dev_state->state->print_mask & 2969 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2970 printk(KERN_INFO 2971 "submit_bh(rw=0x%x FLUSH, bdev=%p)\n", 2972 rw, bh->b_bdev); 2973 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2974 if ((dev_state->state->print_mask & 2975 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 2976 BTRFSIC_PRINT_MASK_VERBOSE))) 2977 printk(KERN_INFO 2978 "btrfsic_submit_bh(%s) with FLUSH" 2979 " but dummy block already in use" 2980 " (ignored)!\n", 2981 dev_state->name); 2982 } else { 2983 struct btrfsic_block *const block = 2984 &dev_state->dummy_block_for_bio_bh_flush; 2985 2986 block->is_iodone = 0; 2987 block->never_written = 0; 2988 block->iodone_w_error = 0; 2989 block->flush_gen = dev_state->last_flush_gen + 1; 2990 block->submit_bio_bh_rw = rw; 2991 block->orig_bio_bh_private = bh->b_private; 2992 block->orig_bio_bh_end_io.bh = bh->b_end_io; 2993 block->next_in_same_bio = NULL; 2994 bh->b_private = block; 2995 bh->b_end_io = btrfsic_bh_end_io; 2996 } 2997 } 2998 mutex_unlock(&btrfsic_mutex); 2999 return submit_bh(rw, bh); 3000 } 3001 3002 static void __btrfsic_submit_bio(int rw, struct bio *bio) 3003 { 3004 struct btrfsic_dev_state *dev_state; 3005 3006 if (!btrfsic_is_initialized) 3007 return; 3008 3009 mutex_lock(&btrfsic_mutex); 3010 /* since btrfsic_submit_bio() is also called before 3011 * btrfsic_mount(), this might return NULL */ 3012 dev_state = btrfsic_dev_state_lookup(bio->bi_bdev); 3013 if (NULL != dev_state && 3014 (rw & WRITE) && NULL != bio->bi_io_vec) { 3015 unsigned int i; 3016 u64 dev_bytenr; 3017 u64 cur_bytenr; 3018 int bio_is_patched; 3019 char **mapped_datav; 3020 3021 dev_bytenr = 512 * bio->bi_iter.bi_sector; 3022 bio_is_patched = 0; 3023 if (dev_state->state->print_mask & 3024 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 3025 printk(KERN_INFO 3026 "submit_bio(rw=0x%x, bi_vcnt=%u," 3027 " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", 3028 rw, bio->bi_vcnt, 3029 (unsigned long long)bio->bi_iter.bi_sector, 3030 dev_bytenr, bio->bi_bdev); 3031 3032 mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt, 3033 GFP_NOFS); 3034 if (!mapped_datav) 3035 goto leave; 3036 cur_bytenr = dev_bytenr; 3037 for (i = 0; i < bio->bi_vcnt; i++) { 3038 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE); 3039 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); 3040 if (!mapped_datav[i]) { 3041 while (i > 0) { 3042 i--; 3043 kunmap(bio->bi_io_vec[i].bv_page); 3044 } 3045 kfree(mapped_datav); 3046 goto leave; 3047 } 3048 if (dev_state->state->print_mask & 3049 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE) 3050 printk(KERN_INFO 3051 "#%u: bytenr=%llu, len=%u, offset=%u\n", 3052 i, cur_bytenr, bio->bi_io_vec[i].bv_len, 3053 bio->bi_io_vec[i].bv_offset); 3054 cur_bytenr += bio->bi_io_vec[i].bv_len; 3055 } 3056 btrfsic_process_written_block(dev_state, dev_bytenr, 3057 mapped_datav, bio->bi_vcnt, 3058 bio, &bio_is_patched, 3059 NULL, rw); 3060 while (i > 0) { 3061 i--; 3062 kunmap(bio->bi_io_vec[i].bv_page); 3063 } 3064 kfree(mapped_datav); 3065 } else if (NULL != dev_state && (rw & REQ_FLUSH)) { 3066 if (dev_state->state->print_mask & 3067 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 3068 printk(KERN_INFO 3069 "submit_bio(rw=0x%x FLUSH, bdev=%p)\n", 3070 rw, bio->bi_bdev); 3071 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 3072 if ((dev_state->state->print_mask & 3073 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 3074 BTRFSIC_PRINT_MASK_VERBOSE))) 3075 printk(KERN_INFO 3076 "btrfsic_submit_bio(%s) with FLUSH" 3077 " but dummy block already in use" 3078 " (ignored)!\n", 3079 dev_state->name); 3080 } else { 3081 struct btrfsic_block *const block = 3082 &dev_state->dummy_block_for_bio_bh_flush; 3083 3084 block->is_iodone = 0; 3085 block->never_written = 0; 3086 block->iodone_w_error = 0; 3087 block->flush_gen = dev_state->last_flush_gen + 1; 3088 block->submit_bio_bh_rw = rw; 3089 block->orig_bio_bh_private = bio->bi_private; 3090 block->orig_bio_bh_end_io.bio = bio->bi_end_io; 3091 block->next_in_same_bio = NULL; 3092 bio->bi_private = block; 3093 bio->bi_end_io = btrfsic_bio_end_io; 3094 } 3095 } 3096 leave: 3097 mutex_unlock(&btrfsic_mutex); 3098 } 3099 3100 void btrfsic_submit_bio(int rw, struct bio *bio) 3101 { 3102 __btrfsic_submit_bio(rw, bio); 3103 submit_bio(rw, bio); 3104 } 3105 3106 int btrfsic_submit_bio_wait(int rw, struct bio *bio) 3107 { 3108 __btrfsic_submit_bio(rw, bio); 3109 return submit_bio_wait(rw, bio); 3110 } 3111 3112 int btrfsic_mount(struct btrfs_root *root, 3113 struct btrfs_fs_devices *fs_devices, 3114 int including_extent_data, u32 print_mask) 3115 { 3116 int ret; 3117 struct btrfsic_state *state; 3118 struct list_head *dev_head = &fs_devices->devices; 3119 struct btrfs_device *device; 3120 3121 if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) { 3122 printk(KERN_INFO 3123 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3124 root->nodesize, PAGE_CACHE_SIZE); 3125 return -1; 3126 } 3127 if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) { 3128 printk(KERN_INFO 3129 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3130 root->sectorsize, PAGE_CACHE_SIZE); 3131 return -1; 3132 } 3133 state = kzalloc(sizeof(*state), GFP_NOFS); 3134 if (NULL == state) { 3135 printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n"); 3136 return -1; 3137 } 3138 3139 if (!btrfsic_is_initialized) { 3140 mutex_init(&btrfsic_mutex); 3141 btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable); 3142 btrfsic_is_initialized = 1; 3143 } 3144 mutex_lock(&btrfsic_mutex); 3145 state->root = root; 3146 state->print_mask = print_mask; 3147 state->include_extent_data = including_extent_data; 3148 state->csum_size = 0; 3149 state->metablock_size = root->nodesize; 3150 state->datablock_size = root->sectorsize; 3151 INIT_LIST_HEAD(&state->all_blocks_list); 3152 btrfsic_block_hashtable_init(&state->block_hashtable); 3153 btrfsic_block_link_hashtable_init(&state->block_link_hashtable); 3154 state->max_superblock_generation = 0; 3155 state->latest_superblock = NULL; 3156 3157 list_for_each_entry(device, dev_head, dev_list) { 3158 struct btrfsic_dev_state *ds; 3159 char *p; 3160 3161 if (!device->bdev || !device->name) 3162 continue; 3163 3164 ds = btrfsic_dev_state_alloc(); 3165 if (NULL == ds) { 3166 printk(KERN_INFO 3167 "btrfs check-integrity: kmalloc() failed!\n"); 3168 mutex_unlock(&btrfsic_mutex); 3169 return -1; 3170 } 3171 ds->bdev = device->bdev; 3172 ds->state = state; 3173 bdevname(ds->bdev, ds->name); 3174 ds->name[BDEVNAME_SIZE - 1] = '\0'; 3175 for (p = ds->name; *p != '\0'; p++); 3176 while (p > ds->name && *p != '/') 3177 p--; 3178 if (*p == '/') 3179 p++; 3180 strlcpy(ds->name, p, sizeof(ds->name)); 3181 btrfsic_dev_state_hashtable_add(ds, 3182 &btrfsic_dev_state_hashtable); 3183 } 3184 3185 ret = btrfsic_process_superblock(state, fs_devices); 3186 if (0 != ret) { 3187 mutex_unlock(&btrfsic_mutex); 3188 btrfsic_unmount(root, fs_devices); 3189 return ret; 3190 } 3191 3192 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE) 3193 btrfsic_dump_database(state); 3194 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE) 3195 btrfsic_dump_tree(state); 3196 3197 mutex_unlock(&btrfsic_mutex); 3198 return 0; 3199 } 3200 3201 void btrfsic_unmount(struct btrfs_root *root, 3202 struct btrfs_fs_devices *fs_devices) 3203 { 3204 struct list_head *elem_all; 3205 struct list_head *tmp_all; 3206 struct btrfsic_state *state; 3207 struct list_head *dev_head = &fs_devices->devices; 3208 struct btrfs_device *device; 3209 3210 if (!btrfsic_is_initialized) 3211 return; 3212 3213 mutex_lock(&btrfsic_mutex); 3214 3215 state = NULL; 3216 list_for_each_entry(device, dev_head, dev_list) { 3217 struct btrfsic_dev_state *ds; 3218 3219 if (!device->bdev || !device->name) 3220 continue; 3221 3222 ds = btrfsic_dev_state_hashtable_lookup( 3223 device->bdev, 3224 &btrfsic_dev_state_hashtable); 3225 if (NULL != ds) { 3226 state = ds->state; 3227 btrfsic_dev_state_hashtable_remove(ds); 3228 btrfsic_dev_state_free(ds); 3229 } 3230 } 3231 3232 if (NULL == state) { 3233 printk(KERN_INFO 3234 "btrfsic: error, cannot find state information" 3235 " on umount!\n"); 3236 mutex_unlock(&btrfsic_mutex); 3237 return; 3238 } 3239 3240 /* 3241 * Don't care about keeping the lists' state up to date, 3242 * just free all memory that was allocated dynamically. 3243 * Free the blocks and the block_links. 3244 */ 3245 list_for_each_safe(elem_all, tmp_all, &state->all_blocks_list) { 3246 struct btrfsic_block *const b_all = 3247 list_entry(elem_all, struct btrfsic_block, 3248 all_blocks_node); 3249 struct list_head *elem_ref_to; 3250 struct list_head *tmp_ref_to; 3251 3252 list_for_each_safe(elem_ref_to, tmp_ref_to, 3253 &b_all->ref_to_list) { 3254 struct btrfsic_block_link *const l = 3255 list_entry(elem_ref_to, 3256 struct btrfsic_block_link, 3257 node_ref_to); 3258 3259 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 3260 btrfsic_print_rem_link(state, l); 3261 3262 l->ref_cnt--; 3263 if (0 == l->ref_cnt) 3264 btrfsic_block_link_free(l); 3265 } 3266 3267 if (b_all->is_iodone || b_all->never_written) 3268 btrfsic_block_free(b_all); 3269 else 3270 printk(KERN_INFO "btrfs: attempt to free %c-block" 3271 " @%llu (%s/%llu/%d) on umount which is" 3272 " not yet iodone!\n", 3273 btrfsic_get_block_type(state, b_all), 3274 b_all->logical_bytenr, b_all->dev_state->name, 3275 b_all->dev_bytenr, b_all->mirror_num); 3276 } 3277 3278 mutex_unlock(&btrfsic_mutex); 3279 3280 kfree(state); 3281 } 3282