1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/blkdev.h> 8 #include <linux/radix-tree.h> 9 #include <linux/writeback.h> 10 #include <linux/buffer_head.h> 11 #include <linux/workqueue.h> 12 #include <linux/kthread.h> 13 #include <linux/slab.h> 14 #include <linux/migrate.h> 15 #include <linux/ratelimit.h> 16 #include <linux/uuid.h> 17 #include <linux/semaphore.h> 18 #include <linux/error-injection.h> 19 #include <linux/crc32c.h> 20 #include <linux/sched/mm.h> 21 #include <asm/unaligned.h> 22 #include "ctree.h" 23 #include "disk-io.h" 24 #include "transaction.h" 25 #include "btrfs_inode.h" 26 #include "volumes.h" 27 #include "print-tree.h" 28 #include "locking.h" 29 #include "tree-log.h" 30 #include "free-space-cache.h" 31 #include "free-space-tree.h" 32 #include "inode-map.h" 33 #include "check-integrity.h" 34 #include "rcu-string.h" 35 #include "dev-replace.h" 36 #include "raid56.h" 37 #include "sysfs.h" 38 #include "qgroup.h" 39 #include "compression.h" 40 #include "tree-checker.h" 41 #include "ref-verify.h" 42 43 #ifdef CONFIG_X86 44 #include <asm/cpufeature.h> 45 #endif 46 47 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ 48 BTRFS_HEADER_FLAG_RELOC |\ 49 BTRFS_SUPER_FLAG_ERROR |\ 50 BTRFS_SUPER_FLAG_SEEDING |\ 51 BTRFS_SUPER_FLAG_METADUMP |\ 52 BTRFS_SUPER_FLAG_METADUMP_V2) 53 54 static const struct extent_io_ops btree_extent_io_ops; 55 static void end_workqueue_fn(struct btrfs_work *work); 56 static void btrfs_destroy_ordered_extents(struct btrfs_root *root); 57 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 58 struct btrfs_fs_info *fs_info); 59 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); 60 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, 61 struct extent_io_tree *dirty_pages, 62 int mark); 63 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, 64 struct extent_io_tree *pinned_extents); 65 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info); 66 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info); 67 68 /* 69 * btrfs_end_io_wq structs are used to do processing in task context when an IO 70 * is complete. This is used during reads to verify checksums, and it is used 71 * by writes to insert metadata for new file extents after IO is complete. 72 */ 73 struct btrfs_end_io_wq { 74 struct bio *bio; 75 bio_end_io_t *end_io; 76 void *private; 77 struct btrfs_fs_info *info; 78 blk_status_t status; 79 enum btrfs_wq_endio_type metadata; 80 struct btrfs_work work; 81 }; 82 83 static struct kmem_cache *btrfs_end_io_wq_cache; 84 85 int __init btrfs_end_io_wq_init(void) 86 { 87 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq", 88 sizeof(struct btrfs_end_io_wq), 89 0, 90 SLAB_MEM_SPREAD, 91 NULL); 92 if (!btrfs_end_io_wq_cache) 93 return -ENOMEM; 94 return 0; 95 } 96 97 void __cold btrfs_end_io_wq_exit(void) 98 { 99 kmem_cache_destroy(btrfs_end_io_wq_cache); 100 } 101 102 /* 103 * async submit bios are used to offload expensive checksumming 104 * onto the worker threads. They checksum file and metadata bios 105 * just before they are sent down the IO stack. 106 */ 107 struct async_submit_bio { 108 void *private_data; 109 struct bio *bio; 110 extent_submit_bio_start_t *submit_bio_start; 111 int mirror_num; 112 /* 113 * bio_offset is optional, can be used if the pages in the bio 114 * can't tell us where in the file the bio should go 115 */ 116 u64 bio_offset; 117 struct btrfs_work work; 118 blk_status_t status; 119 }; 120 121 /* 122 * Lockdep class keys for extent_buffer->lock's in this root. For a given 123 * eb, the lockdep key is determined by the btrfs_root it belongs to and 124 * the level the eb occupies in the tree. 125 * 126 * Different roots are used for different purposes and may nest inside each 127 * other and they require separate keysets. As lockdep keys should be 128 * static, assign keysets according to the purpose of the root as indicated 129 * by btrfs_root->root_key.objectid. This ensures that all special purpose 130 * roots have separate keysets. 131 * 132 * Lock-nesting across peer nodes is always done with the immediate parent 133 * node locked thus preventing deadlock. As lockdep doesn't know this, use 134 * subclass to avoid triggering lockdep warning in such cases. 135 * 136 * The key is set by the readpage_end_io_hook after the buffer has passed 137 * csum validation but before the pages are unlocked. It is also set by 138 * btrfs_init_new_buffer on freshly allocated blocks. 139 * 140 * We also add a check to make sure the highest level of the tree is the 141 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code 142 * needs update as well. 143 */ 144 #ifdef CONFIG_DEBUG_LOCK_ALLOC 145 # if BTRFS_MAX_LEVEL != 8 146 # error 147 # endif 148 149 static struct btrfs_lockdep_keyset { 150 u64 id; /* root objectid */ 151 const char *name_stem; /* lock name stem */ 152 char names[BTRFS_MAX_LEVEL + 1][20]; 153 struct lock_class_key keys[BTRFS_MAX_LEVEL + 1]; 154 } btrfs_lockdep_keysets[] = { 155 { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" }, 156 { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" }, 157 { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" }, 158 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, 159 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, 160 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, 161 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, 162 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, 163 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, 164 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, 165 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, 166 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" }, 167 { .id = 0, .name_stem = "tree" }, 168 }; 169 170 void __init btrfs_init_lockdep(void) 171 { 172 int i, j; 173 174 /* initialize lockdep class names */ 175 for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { 176 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; 177 178 for (j = 0; j < ARRAY_SIZE(ks->names); j++) 179 snprintf(ks->names[j], sizeof(ks->names[j]), 180 "btrfs-%s-%02d", ks->name_stem, j); 181 } 182 } 183 184 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, 185 int level) 186 { 187 struct btrfs_lockdep_keyset *ks; 188 189 BUG_ON(level >= ARRAY_SIZE(ks->keys)); 190 191 /* find the matching keyset, id 0 is the default entry */ 192 for (ks = btrfs_lockdep_keysets; ks->id; ks++) 193 if (ks->id == objectid) 194 break; 195 196 lockdep_set_class_and_name(&eb->lock, 197 &ks->keys[level], ks->names[level]); 198 } 199 200 #endif 201 202 /* 203 * extents on the btree inode are pretty simple, there's one extent 204 * that covers the entire device 205 */ 206 struct extent_map *btree_get_extent(struct btrfs_inode *inode, 207 struct page *page, size_t pg_offset, u64 start, u64 len, 208 int create) 209 { 210 struct btrfs_fs_info *fs_info = inode->root->fs_info; 211 struct extent_map_tree *em_tree = &inode->extent_tree; 212 struct extent_map *em; 213 int ret; 214 215 read_lock(&em_tree->lock); 216 em = lookup_extent_mapping(em_tree, start, len); 217 if (em) { 218 em->bdev = fs_info->fs_devices->latest_bdev; 219 read_unlock(&em_tree->lock); 220 goto out; 221 } 222 read_unlock(&em_tree->lock); 223 224 em = alloc_extent_map(); 225 if (!em) { 226 em = ERR_PTR(-ENOMEM); 227 goto out; 228 } 229 em->start = 0; 230 em->len = (u64)-1; 231 em->block_len = (u64)-1; 232 em->block_start = 0; 233 em->bdev = fs_info->fs_devices->latest_bdev; 234 235 write_lock(&em_tree->lock); 236 ret = add_extent_mapping(em_tree, em, 0); 237 if (ret == -EEXIST) { 238 free_extent_map(em); 239 em = lookup_extent_mapping(em_tree, start, len); 240 if (!em) 241 em = ERR_PTR(-EIO); 242 } else if (ret) { 243 free_extent_map(em); 244 em = ERR_PTR(ret); 245 } 246 write_unlock(&em_tree->lock); 247 248 out: 249 return em; 250 } 251 252 u32 btrfs_csum_data(const char *data, u32 seed, size_t len) 253 { 254 return crc32c(seed, data, len); 255 } 256 257 void btrfs_csum_final(u32 crc, u8 *result) 258 { 259 put_unaligned_le32(~crc, result); 260 } 261 262 /* 263 * Compute the csum of a btree block and store the result to provided buffer. 264 * 265 * Returns error if the extent buffer cannot be mapped. 266 */ 267 static int csum_tree_block(struct extent_buffer *buf, u8 *result) 268 { 269 unsigned long len; 270 unsigned long cur_len; 271 unsigned long offset = BTRFS_CSUM_SIZE; 272 char *kaddr; 273 unsigned long map_start; 274 unsigned long map_len; 275 int err; 276 u32 crc = ~(u32)0; 277 278 len = buf->len - offset; 279 while (len > 0) { 280 /* 281 * Note: we don't need to check for the err == 1 case here, as 282 * with the given combination of 'start = BTRFS_CSUM_SIZE (32)' 283 * and 'min_len = 32' and the currently implemented mapping 284 * algorithm we cannot cross a page boundary. 285 */ 286 err = map_private_extent_buffer(buf, offset, 32, 287 &kaddr, &map_start, &map_len); 288 if (WARN_ON(err)) 289 return err; 290 cur_len = min(len, map_len - (offset - map_start)); 291 crc = btrfs_csum_data(kaddr + offset - map_start, 292 crc, cur_len); 293 len -= cur_len; 294 offset += cur_len; 295 } 296 memset(result, 0, BTRFS_CSUM_SIZE); 297 298 btrfs_csum_final(crc, result); 299 300 return 0; 301 } 302 303 /* 304 * we can't consider a given block up to date unless the transid of the 305 * block matches the transid in the parent node's pointer. This is how we 306 * detect blocks that either didn't get written at all or got written 307 * in the wrong place. 308 */ 309 static int verify_parent_transid(struct extent_io_tree *io_tree, 310 struct extent_buffer *eb, u64 parent_transid, 311 int atomic) 312 { 313 struct extent_state *cached_state = NULL; 314 int ret; 315 bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB); 316 317 if (!parent_transid || btrfs_header_generation(eb) == parent_transid) 318 return 0; 319 320 if (atomic) 321 return -EAGAIN; 322 323 if (need_lock) { 324 btrfs_tree_read_lock(eb); 325 btrfs_set_lock_blocking_read(eb); 326 } 327 328 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 329 &cached_state); 330 if (extent_buffer_uptodate(eb) && 331 btrfs_header_generation(eb) == parent_transid) { 332 ret = 0; 333 goto out; 334 } 335 btrfs_err_rl(eb->fs_info, 336 "parent transid verify failed on %llu wanted %llu found %llu", 337 eb->start, 338 parent_transid, btrfs_header_generation(eb)); 339 ret = 1; 340 341 /* 342 * Things reading via commit roots that don't have normal protection, 343 * like send, can have a really old block in cache that may point at a 344 * block that has been freed and re-allocated. So don't clear uptodate 345 * if we find an eb that is under IO (dirty/writeback) because we could 346 * end up reading in the stale data and then writing it back out and 347 * making everybody very sad. 348 */ 349 if (!extent_buffer_under_io(eb)) 350 clear_extent_buffer_uptodate(eb); 351 out: 352 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, 353 &cached_state); 354 if (need_lock) 355 btrfs_tree_read_unlock_blocking(eb); 356 return ret; 357 } 358 359 /* 360 * Return 0 if the superblock checksum type matches the checksum value of that 361 * algorithm. Pass the raw disk superblock data. 362 */ 363 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, 364 char *raw_disk_sb) 365 { 366 struct btrfs_super_block *disk_sb = 367 (struct btrfs_super_block *)raw_disk_sb; 368 u16 csum_type = btrfs_super_csum_type(disk_sb); 369 int ret = 0; 370 371 if (csum_type == BTRFS_CSUM_TYPE_CRC32) { 372 u32 crc = ~(u32)0; 373 char result[sizeof(crc)]; 374 375 /* 376 * The super_block structure does not span the whole 377 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space 378 * is filled with zeros and is included in the checksum. 379 */ 380 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, 381 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); 382 btrfs_csum_final(crc, result); 383 384 if (memcmp(raw_disk_sb, result, sizeof(result))) 385 ret = 1; 386 } 387 388 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { 389 btrfs_err(fs_info, "unsupported checksum algorithm %u", 390 csum_type); 391 ret = 1; 392 } 393 394 return ret; 395 } 396 397 int btrfs_verify_level_key(struct extent_buffer *eb, int level, 398 struct btrfs_key *first_key, u64 parent_transid) 399 { 400 struct btrfs_fs_info *fs_info = eb->fs_info; 401 int found_level; 402 struct btrfs_key found_key; 403 int ret; 404 405 found_level = btrfs_header_level(eb); 406 if (found_level != level) { 407 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 408 KERN_ERR "BTRFS: tree level check failed\n"); 409 btrfs_err(fs_info, 410 "tree level mismatch detected, bytenr=%llu level expected=%u has=%u", 411 eb->start, level, found_level); 412 return -EIO; 413 } 414 415 if (!first_key) 416 return 0; 417 418 /* 419 * For live tree block (new tree blocks in current transaction), 420 * we need proper lock context to avoid race, which is impossible here. 421 * So we only checks tree blocks which is read from disk, whose 422 * generation <= fs_info->last_trans_committed. 423 */ 424 if (btrfs_header_generation(eb) > fs_info->last_trans_committed) 425 return 0; 426 if (found_level) 427 btrfs_node_key_to_cpu(eb, &found_key, 0); 428 else 429 btrfs_item_key_to_cpu(eb, &found_key, 0); 430 ret = btrfs_comp_cpu_keys(first_key, &found_key); 431 432 if (ret) { 433 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 434 KERN_ERR "BTRFS: tree first key check failed\n"); 435 btrfs_err(fs_info, 436 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)", 437 eb->start, parent_transid, first_key->objectid, 438 first_key->type, first_key->offset, 439 found_key.objectid, found_key.type, 440 found_key.offset); 441 } 442 return ret; 443 } 444 445 /* 446 * helper to read a given tree block, doing retries as required when 447 * the checksums don't match and we have alternate mirrors to try. 448 * 449 * @parent_transid: expected transid, skip check if 0 450 * @level: expected level, mandatory check 451 * @first_key: expected key of first slot, skip check if NULL 452 */ 453 static int btree_read_extent_buffer_pages(struct extent_buffer *eb, 454 u64 parent_transid, int level, 455 struct btrfs_key *first_key) 456 { 457 struct btrfs_fs_info *fs_info = eb->fs_info; 458 struct extent_io_tree *io_tree; 459 int failed = 0; 460 int ret; 461 int num_copies = 0; 462 int mirror_num = 0; 463 int failed_mirror = 0; 464 465 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; 466 while (1) { 467 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 468 ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num); 469 if (!ret) { 470 if (verify_parent_transid(io_tree, eb, 471 parent_transid, 0)) 472 ret = -EIO; 473 else if (btrfs_verify_level_key(eb, level, 474 first_key, parent_transid)) 475 ret = -EUCLEAN; 476 else 477 break; 478 } 479 480 num_copies = btrfs_num_copies(fs_info, 481 eb->start, eb->len); 482 if (num_copies == 1) 483 break; 484 485 if (!failed_mirror) { 486 failed = 1; 487 failed_mirror = eb->read_mirror; 488 } 489 490 mirror_num++; 491 if (mirror_num == failed_mirror) 492 mirror_num++; 493 494 if (mirror_num > num_copies) 495 break; 496 } 497 498 if (failed && !ret && failed_mirror) 499 btrfs_repair_eb_io_failure(eb, failed_mirror); 500 501 return ret; 502 } 503 504 /* 505 * checksum a dirty tree block before IO. This has extra checks to make sure 506 * we only fill in the checksum field in the first page of a multi-page block 507 */ 508 509 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) 510 { 511 u64 start = page_offset(page); 512 u64 found_start; 513 u8 result[BTRFS_CSUM_SIZE]; 514 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 515 struct extent_buffer *eb; 516 int ret; 517 518 eb = (struct extent_buffer *)page->private; 519 if (page != eb->pages[0]) 520 return 0; 521 522 found_start = btrfs_header_bytenr(eb); 523 /* 524 * Please do not consolidate these warnings into a single if. 525 * It is useful to know what went wrong. 526 */ 527 if (WARN_ON(found_start != start)) 528 return -EUCLEAN; 529 if (WARN_ON(!PageUptodate(page))) 530 return -EUCLEAN; 531 532 ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid, 533 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0); 534 535 if (csum_tree_block(eb, result)) 536 return -EINVAL; 537 538 if (btrfs_header_level(eb)) 539 ret = btrfs_check_node(eb); 540 else 541 ret = btrfs_check_leaf_full(eb); 542 543 if (ret < 0) { 544 btrfs_err(fs_info, 545 "block=%llu write time tree block corruption detected", 546 eb->start); 547 return ret; 548 } 549 write_extent_buffer(eb, result, 0, csum_size); 550 551 return 0; 552 } 553 554 static int check_tree_block_fsid(struct extent_buffer *eb) 555 { 556 struct btrfs_fs_info *fs_info = eb->fs_info; 557 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 558 u8 fsid[BTRFS_FSID_SIZE]; 559 int ret = 1; 560 561 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); 562 while (fs_devices) { 563 u8 *metadata_uuid; 564 565 /* 566 * Checking the incompat flag is only valid for the current 567 * fs. For seed devices it's forbidden to have their uuid 568 * changed so reading ->fsid in this case is fine 569 */ 570 if (fs_devices == fs_info->fs_devices && 571 btrfs_fs_incompat(fs_info, METADATA_UUID)) 572 metadata_uuid = fs_devices->metadata_uuid; 573 else 574 metadata_uuid = fs_devices->fsid; 575 576 if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) { 577 ret = 0; 578 break; 579 } 580 fs_devices = fs_devices->seed; 581 } 582 return ret; 583 } 584 585 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, 586 u64 phy_offset, struct page *page, 587 u64 start, u64 end, int mirror) 588 { 589 u64 found_start; 590 int found_level; 591 struct extent_buffer *eb; 592 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 593 struct btrfs_fs_info *fs_info = root->fs_info; 594 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 595 int ret = 0; 596 u8 result[BTRFS_CSUM_SIZE]; 597 int reads_done; 598 599 if (!page->private) 600 goto out; 601 602 eb = (struct extent_buffer *)page->private; 603 604 /* the pending IO might have been the only thing that kept this buffer 605 * in memory. Make sure we have a ref for all this other checks 606 */ 607 extent_buffer_get(eb); 608 609 reads_done = atomic_dec_and_test(&eb->io_pages); 610 if (!reads_done) 611 goto err; 612 613 eb->read_mirror = mirror; 614 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { 615 ret = -EIO; 616 goto err; 617 } 618 619 found_start = btrfs_header_bytenr(eb); 620 if (found_start != eb->start) { 621 btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu", 622 eb->start, found_start); 623 ret = -EIO; 624 goto err; 625 } 626 if (check_tree_block_fsid(eb)) { 627 btrfs_err_rl(fs_info, "bad fsid on block %llu", 628 eb->start); 629 ret = -EIO; 630 goto err; 631 } 632 found_level = btrfs_header_level(eb); 633 if (found_level >= BTRFS_MAX_LEVEL) { 634 btrfs_err(fs_info, "bad tree block level %d on %llu", 635 (int)btrfs_header_level(eb), eb->start); 636 ret = -EIO; 637 goto err; 638 } 639 640 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), 641 eb, found_level); 642 643 ret = csum_tree_block(eb, result); 644 if (ret) 645 goto err; 646 647 if (memcmp_extent_buffer(eb, result, 0, csum_size)) { 648 u32 val; 649 u32 found = 0; 650 651 memcpy(&found, result, csum_size); 652 653 read_extent_buffer(eb, &val, 0, csum_size); 654 btrfs_warn_rl(fs_info, 655 "%s checksum verify failed on %llu wanted %x found %x level %d", 656 fs_info->sb->s_id, eb->start, 657 val, found, btrfs_header_level(eb)); 658 ret = -EUCLEAN; 659 goto err; 660 } 661 662 /* 663 * If this is a leaf block and it is corrupt, set the corrupt bit so 664 * that we don't try and read the other copies of this block, just 665 * return -EIO. 666 */ 667 if (found_level == 0 && btrfs_check_leaf_full(eb)) { 668 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 669 ret = -EIO; 670 } 671 672 if (found_level > 0 && btrfs_check_node(eb)) 673 ret = -EIO; 674 675 if (!ret) 676 set_extent_buffer_uptodate(eb); 677 else 678 btrfs_err(fs_info, 679 "block=%llu read time tree block corruption detected", 680 eb->start); 681 err: 682 if (reads_done && 683 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) 684 btree_readahead_hook(eb, ret); 685 686 if (ret) { 687 /* 688 * our io error hook is going to dec the io pages 689 * again, we have to make sure it has something 690 * to decrement 691 */ 692 atomic_inc(&eb->io_pages); 693 clear_extent_buffer_uptodate(eb); 694 } 695 free_extent_buffer(eb); 696 out: 697 return ret; 698 } 699 700 static void end_workqueue_bio(struct bio *bio) 701 { 702 struct btrfs_end_io_wq *end_io_wq = bio->bi_private; 703 struct btrfs_fs_info *fs_info; 704 struct btrfs_workqueue *wq; 705 btrfs_work_func_t func; 706 707 fs_info = end_io_wq->info; 708 end_io_wq->status = bio->bi_status; 709 710 if (bio_op(bio) == REQ_OP_WRITE) { 711 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { 712 wq = fs_info->endio_meta_write_workers; 713 func = btrfs_endio_meta_write_helper; 714 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { 715 wq = fs_info->endio_freespace_worker; 716 func = btrfs_freespace_write_helper; 717 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { 718 wq = fs_info->endio_raid56_workers; 719 func = btrfs_endio_raid56_helper; 720 } else { 721 wq = fs_info->endio_write_workers; 722 func = btrfs_endio_write_helper; 723 } 724 } else { 725 if (unlikely(end_io_wq->metadata == 726 BTRFS_WQ_ENDIO_DIO_REPAIR)) { 727 wq = fs_info->endio_repair_workers; 728 func = btrfs_endio_repair_helper; 729 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { 730 wq = fs_info->endio_raid56_workers; 731 func = btrfs_endio_raid56_helper; 732 } else if (end_io_wq->metadata) { 733 wq = fs_info->endio_meta_workers; 734 func = btrfs_endio_meta_helper; 735 } else { 736 wq = fs_info->endio_workers; 737 func = btrfs_endio_helper; 738 } 739 } 740 741 btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); 742 btrfs_queue_work(wq, &end_io_wq->work); 743 } 744 745 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 746 enum btrfs_wq_endio_type metadata) 747 { 748 struct btrfs_end_io_wq *end_io_wq; 749 750 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); 751 if (!end_io_wq) 752 return BLK_STS_RESOURCE; 753 754 end_io_wq->private = bio->bi_private; 755 end_io_wq->end_io = bio->bi_end_io; 756 end_io_wq->info = info; 757 end_io_wq->status = 0; 758 end_io_wq->bio = bio; 759 end_io_wq->metadata = metadata; 760 761 bio->bi_private = end_io_wq; 762 bio->bi_end_io = end_workqueue_bio; 763 return 0; 764 } 765 766 static void run_one_async_start(struct btrfs_work *work) 767 { 768 struct async_submit_bio *async; 769 blk_status_t ret; 770 771 async = container_of(work, struct async_submit_bio, work); 772 ret = async->submit_bio_start(async->private_data, async->bio, 773 async->bio_offset); 774 if (ret) 775 async->status = ret; 776 } 777 778 /* 779 * In order to insert checksums into the metadata in large chunks, we wait 780 * until bio submission time. All the pages in the bio are checksummed and 781 * sums are attached onto the ordered extent record. 782 * 783 * At IO completion time the csums attached on the ordered extent record are 784 * inserted into the tree. 785 */ 786 static void run_one_async_done(struct btrfs_work *work) 787 { 788 struct async_submit_bio *async; 789 struct inode *inode; 790 blk_status_t ret; 791 792 async = container_of(work, struct async_submit_bio, work); 793 inode = async->private_data; 794 795 /* If an error occurred we just want to clean up the bio and move on */ 796 if (async->status) { 797 async->bio->bi_status = async->status; 798 bio_endio(async->bio); 799 return; 800 } 801 802 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, 803 async->mirror_num, 1); 804 if (ret) { 805 async->bio->bi_status = ret; 806 bio_endio(async->bio); 807 } 808 } 809 810 static void run_one_async_free(struct btrfs_work *work) 811 { 812 struct async_submit_bio *async; 813 814 async = container_of(work, struct async_submit_bio, work); 815 kfree(async); 816 } 817 818 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 819 int mirror_num, unsigned long bio_flags, 820 u64 bio_offset, void *private_data, 821 extent_submit_bio_start_t *submit_bio_start) 822 { 823 struct async_submit_bio *async; 824 825 async = kmalloc(sizeof(*async), GFP_NOFS); 826 if (!async) 827 return BLK_STS_RESOURCE; 828 829 async->private_data = private_data; 830 async->bio = bio; 831 async->mirror_num = mirror_num; 832 async->submit_bio_start = submit_bio_start; 833 834 btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, 835 run_one_async_done, run_one_async_free); 836 837 async->bio_offset = bio_offset; 838 839 async->status = 0; 840 841 if (op_is_sync(bio->bi_opf)) 842 btrfs_set_work_high_priority(&async->work); 843 844 btrfs_queue_work(fs_info->workers, &async->work); 845 return 0; 846 } 847 848 static blk_status_t btree_csum_one_bio(struct bio *bio) 849 { 850 struct bio_vec *bvec; 851 struct btrfs_root *root; 852 int ret = 0; 853 struct bvec_iter_all iter_all; 854 855 ASSERT(!bio_flagged(bio, BIO_CLONED)); 856 bio_for_each_segment_all(bvec, bio, iter_all) { 857 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 858 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); 859 if (ret) 860 break; 861 } 862 863 return errno_to_blk_status(ret); 864 } 865 866 static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio, 867 u64 bio_offset) 868 { 869 /* 870 * when we're called for a write, we're already in the async 871 * submission context. Just jump into btrfs_map_bio 872 */ 873 return btree_csum_one_bio(bio); 874 } 875 876 static int check_async_write(struct btrfs_inode *bi) 877 { 878 if (atomic_read(&bi->sync_writers)) 879 return 0; 880 #ifdef CONFIG_X86 881 if (static_cpu_has(X86_FEATURE_XMM4_2)) 882 return 0; 883 #endif 884 return 1; 885 } 886 887 static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio, 888 int mirror_num, 889 unsigned long bio_flags) 890 { 891 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 892 int async = check_async_write(BTRFS_I(inode)); 893 blk_status_t ret; 894 895 if (bio_op(bio) != REQ_OP_WRITE) { 896 /* 897 * called for a read, do the setup so that checksum validation 898 * can happen in the async kernel threads 899 */ 900 ret = btrfs_bio_wq_end_io(fs_info, bio, 901 BTRFS_WQ_ENDIO_METADATA); 902 if (ret) 903 goto out_w_error; 904 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); 905 } else if (!async) { 906 ret = btree_csum_one_bio(bio); 907 if (ret) 908 goto out_w_error; 909 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); 910 } else { 911 /* 912 * kthread helpers are used to submit writes so that 913 * checksumming can happen in parallel across all CPUs 914 */ 915 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0, 916 0, inode, btree_submit_bio_start); 917 } 918 919 if (ret) 920 goto out_w_error; 921 return 0; 922 923 out_w_error: 924 bio->bi_status = ret; 925 bio_endio(bio); 926 return ret; 927 } 928 929 #ifdef CONFIG_MIGRATION 930 static int btree_migratepage(struct address_space *mapping, 931 struct page *newpage, struct page *page, 932 enum migrate_mode mode) 933 { 934 /* 935 * we can't safely write a btree page from here, 936 * we haven't done the locking hook 937 */ 938 if (PageDirty(page)) 939 return -EAGAIN; 940 /* 941 * Buffers may be managed in a filesystem specific way. 942 * We must have no buffers or drop them. 943 */ 944 if (page_has_private(page) && 945 !try_to_release_page(page, GFP_KERNEL)) 946 return -EAGAIN; 947 return migrate_page(mapping, newpage, page, mode); 948 } 949 #endif 950 951 952 static int btree_writepages(struct address_space *mapping, 953 struct writeback_control *wbc) 954 { 955 struct btrfs_fs_info *fs_info; 956 int ret; 957 958 if (wbc->sync_mode == WB_SYNC_NONE) { 959 960 if (wbc->for_kupdate) 961 return 0; 962 963 fs_info = BTRFS_I(mapping->host)->root->fs_info; 964 /* this is a bit racy, but that's ok */ 965 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, 966 BTRFS_DIRTY_METADATA_THRESH, 967 fs_info->dirty_metadata_batch); 968 if (ret < 0) 969 return 0; 970 } 971 return btree_write_cache_pages(mapping, wbc); 972 } 973 974 static int btree_readpage(struct file *file, struct page *page) 975 { 976 struct extent_io_tree *tree; 977 tree = &BTRFS_I(page->mapping->host)->io_tree; 978 return extent_read_full_page(tree, page, btree_get_extent, 0); 979 } 980 981 static int btree_releasepage(struct page *page, gfp_t gfp_flags) 982 { 983 if (PageWriteback(page) || PageDirty(page)) 984 return 0; 985 986 return try_release_extent_buffer(page); 987 } 988 989 static void btree_invalidatepage(struct page *page, unsigned int offset, 990 unsigned int length) 991 { 992 struct extent_io_tree *tree; 993 tree = &BTRFS_I(page->mapping->host)->io_tree; 994 extent_invalidatepage(tree, page, offset); 995 btree_releasepage(page, GFP_NOFS); 996 if (PagePrivate(page)) { 997 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, 998 "page private not zero on page %llu", 999 (unsigned long long)page_offset(page)); 1000 ClearPagePrivate(page); 1001 set_page_private(page, 0); 1002 put_page(page); 1003 } 1004 } 1005 1006 static int btree_set_page_dirty(struct page *page) 1007 { 1008 #ifdef DEBUG 1009 struct extent_buffer *eb; 1010 1011 BUG_ON(!PagePrivate(page)); 1012 eb = (struct extent_buffer *)page->private; 1013 BUG_ON(!eb); 1014 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); 1015 BUG_ON(!atomic_read(&eb->refs)); 1016 btrfs_assert_tree_locked(eb); 1017 #endif 1018 return __set_page_dirty_nobuffers(page); 1019 } 1020 1021 static const struct address_space_operations btree_aops = { 1022 .readpage = btree_readpage, 1023 .writepages = btree_writepages, 1024 .releasepage = btree_releasepage, 1025 .invalidatepage = btree_invalidatepage, 1026 #ifdef CONFIG_MIGRATION 1027 .migratepage = btree_migratepage, 1028 #endif 1029 .set_page_dirty = btree_set_page_dirty, 1030 }; 1031 1032 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr) 1033 { 1034 struct extent_buffer *buf = NULL; 1035 int ret; 1036 1037 buf = btrfs_find_create_tree_block(fs_info, bytenr); 1038 if (IS_ERR(buf)) 1039 return; 1040 1041 ret = read_extent_buffer_pages(buf, WAIT_NONE, 0); 1042 if (ret < 0) 1043 free_extent_buffer_stale(buf); 1044 else 1045 free_extent_buffer(buf); 1046 } 1047 1048 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, 1049 int mirror_num, struct extent_buffer **eb) 1050 { 1051 struct extent_buffer *buf = NULL; 1052 int ret; 1053 1054 buf = btrfs_find_create_tree_block(fs_info, bytenr); 1055 if (IS_ERR(buf)) 1056 return 0; 1057 1058 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); 1059 1060 ret = read_extent_buffer_pages(buf, WAIT_PAGE_LOCK, mirror_num); 1061 if (ret) { 1062 free_extent_buffer_stale(buf); 1063 return ret; 1064 } 1065 1066 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { 1067 free_extent_buffer_stale(buf); 1068 return -EIO; 1069 } else if (extent_buffer_uptodate(buf)) { 1070 *eb = buf; 1071 } else { 1072 free_extent_buffer(buf); 1073 } 1074 return 0; 1075 } 1076 1077 struct extent_buffer *btrfs_find_create_tree_block( 1078 struct btrfs_fs_info *fs_info, 1079 u64 bytenr) 1080 { 1081 if (btrfs_is_testing(fs_info)) 1082 return alloc_test_extent_buffer(fs_info, bytenr); 1083 return alloc_extent_buffer(fs_info, bytenr); 1084 } 1085 1086 /* 1087 * Read tree block at logical address @bytenr and do variant basic but critical 1088 * verification. 1089 * 1090 * @parent_transid: expected transid of this tree block, skip check if 0 1091 * @level: expected level, mandatory check 1092 * @first_key: expected key in slot 0, skip check if NULL 1093 */ 1094 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, 1095 u64 parent_transid, int level, 1096 struct btrfs_key *first_key) 1097 { 1098 struct extent_buffer *buf = NULL; 1099 int ret; 1100 1101 buf = btrfs_find_create_tree_block(fs_info, bytenr); 1102 if (IS_ERR(buf)) 1103 return buf; 1104 1105 ret = btree_read_extent_buffer_pages(buf, parent_transid, 1106 level, first_key); 1107 if (ret) { 1108 free_extent_buffer_stale(buf); 1109 return ERR_PTR(ret); 1110 } 1111 return buf; 1112 1113 } 1114 1115 void btrfs_clean_tree_block(struct extent_buffer *buf) 1116 { 1117 struct btrfs_fs_info *fs_info = buf->fs_info; 1118 if (btrfs_header_generation(buf) == 1119 fs_info->running_transaction->transid) { 1120 btrfs_assert_tree_locked(buf); 1121 1122 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { 1123 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, 1124 -buf->len, 1125 fs_info->dirty_metadata_batch); 1126 /* ugh, clear_extent_buffer_dirty needs to lock the page */ 1127 btrfs_set_lock_blocking_write(buf); 1128 clear_extent_buffer_dirty(buf); 1129 } 1130 } 1131 } 1132 1133 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) 1134 { 1135 struct btrfs_subvolume_writers *writers; 1136 int ret; 1137 1138 writers = kmalloc(sizeof(*writers), GFP_NOFS); 1139 if (!writers) 1140 return ERR_PTR(-ENOMEM); 1141 1142 ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS); 1143 if (ret < 0) { 1144 kfree(writers); 1145 return ERR_PTR(ret); 1146 } 1147 1148 init_waitqueue_head(&writers->wait); 1149 return writers; 1150 } 1151 1152 static void 1153 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers) 1154 { 1155 percpu_counter_destroy(&writers->counter); 1156 kfree(writers); 1157 } 1158 1159 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, 1160 u64 objectid) 1161 { 1162 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); 1163 root->node = NULL; 1164 root->commit_root = NULL; 1165 root->state = 0; 1166 root->orphan_cleanup_state = 0; 1167 1168 root->last_trans = 0; 1169 root->highest_objectid = 0; 1170 root->nr_delalloc_inodes = 0; 1171 root->nr_ordered_extents = 0; 1172 root->inode_tree = RB_ROOT; 1173 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); 1174 root->block_rsv = NULL; 1175 1176 INIT_LIST_HEAD(&root->dirty_list); 1177 INIT_LIST_HEAD(&root->root_list); 1178 INIT_LIST_HEAD(&root->delalloc_inodes); 1179 INIT_LIST_HEAD(&root->delalloc_root); 1180 INIT_LIST_HEAD(&root->ordered_extents); 1181 INIT_LIST_HEAD(&root->ordered_root); 1182 INIT_LIST_HEAD(&root->reloc_dirty_list); 1183 INIT_LIST_HEAD(&root->logged_list[0]); 1184 INIT_LIST_HEAD(&root->logged_list[1]); 1185 spin_lock_init(&root->inode_lock); 1186 spin_lock_init(&root->delalloc_lock); 1187 spin_lock_init(&root->ordered_extent_lock); 1188 spin_lock_init(&root->accounting_lock); 1189 spin_lock_init(&root->log_extents_lock[0]); 1190 spin_lock_init(&root->log_extents_lock[1]); 1191 spin_lock_init(&root->qgroup_meta_rsv_lock); 1192 mutex_init(&root->objectid_mutex); 1193 mutex_init(&root->log_mutex); 1194 mutex_init(&root->ordered_extent_mutex); 1195 mutex_init(&root->delalloc_mutex); 1196 init_waitqueue_head(&root->log_writer_wait); 1197 init_waitqueue_head(&root->log_commit_wait[0]); 1198 init_waitqueue_head(&root->log_commit_wait[1]); 1199 INIT_LIST_HEAD(&root->log_ctxs[0]); 1200 INIT_LIST_HEAD(&root->log_ctxs[1]); 1201 atomic_set(&root->log_commit[0], 0); 1202 atomic_set(&root->log_commit[1], 0); 1203 atomic_set(&root->log_writers, 0); 1204 atomic_set(&root->log_batch, 0); 1205 refcount_set(&root->refs, 1); 1206 atomic_set(&root->will_be_snapshotted, 0); 1207 atomic_set(&root->snapshot_force_cow, 0); 1208 atomic_set(&root->nr_swapfiles, 0); 1209 root->log_transid = 0; 1210 root->log_transid_committed = -1; 1211 root->last_log_commit = 0; 1212 if (!dummy) 1213 extent_io_tree_init(fs_info, &root->dirty_log_pages, 1214 IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL); 1215 1216 memset(&root->root_key, 0, sizeof(root->root_key)); 1217 memset(&root->root_item, 0, sizeof(root->root_item)); 1218 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); 1219 if (!dummy) 1220 root->defrag_trans_start = fs_info->generation; 1221 else 1222 root->defrag_trans_start = 0; 1223 root->root_key.objectid = objectid; 1224 root->anon_dev = 0; 1225 1226 spin_lock_init(&root->root_item_lock); 1227 btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks); 1228 } 1229 1230 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, 1231 gfp_t flags) 1232 { 1233 struct btrfs_root *root = kzalloc(sizeof(*root), flags); 1234 if (root) 1235 root->fs_info = fs_info; 1236 return root; 1237 } 1238 1239 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1240 /* Should only be used by the testing infrastructure */ 1241 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info) 1242 { 1243 struct btrfs_root *root; 1244 1245 if (!fs_info) 1246 return ERR_PTR(-EINVAL); 1247 1248 root = btrfs_alloc_root(fs_info, GFP_KERNEL); 1249 if (!root) 1250 return ERR_PTR(-ENOMEM); 1251 1252 /* We don't use the stripesize in selftest, set it as sectorsize */ 1253 __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID); 1254 root->alloc_bytenr = 0; 1255 1256 return root; 1257 } 1258 #endif 1259 1260 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, 1261 u64 objectid) 1262 { 1263 struct btrfs_fs_info *fs_info = trans->fs_info; 1264 struct extent_buffer *leaf; 1265 struct btrfs_root *tree_root = fs_info->tree_root; 1266 struct btrfs_root *root; 1267 struct btrfs_key key; 1268 unsigned int nofs_flag; 1269 int ret = 0; 1270 uuid_le uuid = NULL_UUID_LE; 1271 1272 /* 1273 * We're holding a transaction handle, so use a NOFS memory allocation 1274 * context to avoid deadlock if reclaim happens. 1275 */ 1276 nofs_flag = memalloc_nofs_save(); 1277 root = btrfs_alloc_root(fs_info, GFP_KERNEL); 1278 memalloc_nofs_restore(nofs_flag); 1279 if (!root) 1280 return ERR_PTR(-ENOMEM); 1281 1282 __setup_root(root, fs_info, objectid); 1283 root->root_key.objectid = objectid; 1284 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 1285 root->root_key.offset = 0; 1286 1287 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); 1288 if (IS_ERR(leaf)) { 1289 ret = PTR_ERR(leaf); 1290 leaf = NULL; 1291 goto fail; 1292 } 1293 1294 root->node = leaf; 1295 btrfs_mark_buffer_dirty(leaf); 1296 1297 root->commit_root = btrfs_root_node(root); 1298 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 1299 1300 root->root_item.flags = 0; 1301 root->root_item.byte_limit = 0; 1302 btrfs_set_root_bytenr(&root->root_item, leaf->start); 1303 btrfs_set_root_generation(&root->root_item, trans->transid); 1304 btrfs_set_root_level(&root->root_item, 0); 1305 btrfs_set_root_refs(&root->root_item, 1); 1306 btrfs_set_root_used(&root->root_item, leaf->len); 1307 btrfs_set_root_last_snapshot(&root->root_item, 0); 1308 btrfs_set_root_dirid(&root->root_item, 0); 1309 if (is_fstree(objectid)) 1310 uuid_le_gen(&uuid); 1311 memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); 1312 root->root_item.drop_level = 0; 1313 1314 key.objectid = objectid; 1315 key.type = BTRFS_ROOT_ITEM_KEY; 1316 key.offset = 0; 1317 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); 1318 if (ret) 1319 goto fail; 1320 1321 btrfs_tree_unlock(leaf); 1322 1323 return root; 1324 1325 fail: 1326 if (leaf) { 1327 btrfs_tree_unlock(leaf); 1328 free_extent_buffer(root->commit_root); 1329 free_extent_buffer(leaf); 1330 } 1331 kfree(root); 1332 1333 return ERR_PTR(ret); 1334 } 1335 1336 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, 1337 struct btrfs_fs_info *fs_info) 1338 { 1339 struct btrfs_root *root; 1340 struct extent_buffer *leaf; 1341 1342 root = btrfs_alloc_root(fs_info, GFP_NOFS); 1343 if (!root) 1344 return ERR_PTR(-ENOMEM); 1345 1346 __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID); 1347 1348 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; 1349 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 1350 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; 1351 1352 /* 1353 * DON'T set REF_COWS for log trees 1354 * 1355 * log trees do not get reference counted because they go away 1356 * before a real commit is actually done. They do store pointers 1357 * to file data extents, and those reference counts still get 1358 * updated (along with back refs to the log tree). 1359 */ 1360 1361 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, 1362 NULL, 0, 0, 0); 1363 if (IS_ERR(leaf)) { 1364 kfree(root); 1365 return ERR_CAST(leaf); 1366 } 1367 1368 root->node = leaf; 1369 1370 btrfs_mark_buffer_dirty(root->node); 1371 btrfs_tree_unlock(root->node); 1372 return root; 1373 } 1374 1375 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, 1376 struct btrfs_fs_info *fs_info) 1377 { 1378 struct btrfs_root *log_root; 1379 1380 log_root = alloc_log_tree(trans, fs_info); 1381 if (IS_ERR(log_root)) 1382 return PTR_ERR(log_root); 1383 WARN_ON(fs_info->log_root_tree); 1384 fs_info->log_root_tree = log_root; 1385 return 0; 1386 } 1387 1388 int btrfs_add_log_tree(struct btrfs_trans_handle *trans, 1389 struct btrfs_root *root) 1390 { 1391 struct btrfs_fs_info *fs_info = root->fs_info; 1392 struct btrfs_root *log_root; 1393 struct btrfs_inode_item *inode_item; 1394 1395 log_root = alloc_log_tree(trans, fs_info); 1396 if (IS_ERR(log_root)) 1397 return PTR_ERR(log_root); 1398 1399 log_root->last_trans = trans->transid; 1400 log_root->root_key.offset = root->root_key.objectid; 1401 1402 inode_item = &log_root->root_item.inode; 1403 btrfs_set_stack_inode_generation(inode_item, 1); 1404 btrfs_set_stack_inode_size(inode_item, 3); 1405 btrfs_set_stack_inode_nlink(inode_item, 1); 1406 btrfs_set_stack_inode_nbytes(inode_item, 1407 fs_info->nodesize); 1408 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); 1409 1410 btrfs_set_root_node(&log_root->root_item, log_root->node); 1411 1412 WARN_ON(root->log_root); 1413 root->log_root = log_root; 1414 root->log_transid = 0; 1415 root->log_transid_committed = -1; 1416 root->last_log_commit = 0; 1417 return 0; 1418 } 1419 1420 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, 1421 struct btrfs_key *key) 1422 { 1423 struct btrfs_root *root; 1424 struct btrfs_fs_info *fs_info = tree_root->fs_info; 1425 struct btrfs_path *path; 1426 u64 generation; 1427 int ret; 1428 int level; 1429 1430 path = btrfs_alloc_path(); 1431 if (!path) 1432 return ERR_PTR(-ENOMEM); 1433 1434 root = btrfs_alloc_root(fs_info, GFP_NOFS); 1435 if (!root) { 1436 ret = -ENOMEM; 1437 goto alloc_fail; 1438 } 1439 1440 __setup_root(root, fs_info, key->objectid); 1441 1442 ret = btrfs_find_root(tree_root, key, path, 1443 &root->root_item, &root->root_key); 1444 if (ret) { 1445 if (ret > 0) 1446 ret = -ENOENT; 1447 goto find_fail; 1448 } 1449 1450 generation = btrfs_root_generation(&root->root_item); 1451 level = btrfs_root_level(&root->root_item); 1452 root->node = read_tree_block(fs_info, 1453 btrfs_root_bytenr(&root->root_item), 1454 generation, level, NULL); 1455 if (IS_ERR(root->node)) { 1456 ret = PTR_ERR(root->node); 1457 goto find_fail; 1458 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { 1459 ret = -EIO; 1460 free_extent_buffer(root->node); 1461 goto find_fail; 1462 } 1463 root->commit_root = btrfs_root_node(root); 1464 out: 1465 btrfs_free_path(path); 1466 return root; 1467 1468 find_fail: 1469 kfree(root); 1470 alloc_fail: 1471 root = ERR_PTR(ret); 1472 goto out; 1473 } 1474 1475 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, 1476 struct btrfs_key *location) 1477 { 1478 struct btrfs_root *root; 1479 1480 root = btrfs_read_tree_root(tree_root, location); 1481 if (IS_ERR(root)) 1482 return root; 1483 1484 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 1485 set_bit(BTRFS_ROOT_REF_COWS, &root->state); 1486 btrfs_check_and_init_root_item(&root->root_item); 1487 } 1488 1489 return root; 1490 } 1491 1492 int btrfs_init_fs_root(struct btrfs_root *root) 1493 { 1494 int ret; 1495 struct btrfs_subvolume_writers *writers; 1496 1497 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); 1498 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), 1499 GFP_NOFS); 1500 if (!root->free_ino_pinned || !root->free_ino_ctl) { 1501 ret = -ENOMEM; 1502 goto fail; 1503 } 1504 1505 writers = btrfs_alloc_subvolume_writers(); 1506 if (IS_ERR(writers)) { 1507 ret = PTR_ERR(writers); 1508 goto fail; 1509 } 1510 root->subv_writers = writers; 1511 1512 btrfs_init_free_ino_ctl(root); 1513 spin_lock_init(&root->ino_cache_lock); 1514 init_waitqueue_head(&root->ino_cache_wait); 1515 1516 ret = get_anon_bdev(&root->anon_dev); 1517 if (ret) 1518 goto fail; 1519 1520 mutex_lock(&root->objectid_mutex); 1521 ret = btrfs_find_highest_objectid(root, 1522 &root->highest_objectid); 1523 if (ret) { 1524 mutex_unlock(&root->objectid_mutex); 1525 goto fail; 1526 } 1527 1528 ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); 1529 1530 mutex_unlock(&root->objectid_mutex); 1531 1532 return 0; 1533 fail: 1534 /* The caller is responsible to call btrfs_free_fs_root */ 1535 return ret; 1536 } 1537 1538 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, 1539 u64 root_id) 1540 { 1541 struct btrfs_root *root; 1542 1543 spin_lock(&fs_info->fs_roots_radix_lock); 1544 root = radix_tree_lookup(&fs_info->fs_roots_radix, 1545 (unsigned long)root_id); 1546 spin_unlock(&fs_info->fs_roots_radix_lock); 1547 return root; 1548 } 1549 1550 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, 1551 struct btrfs_root *root) 1552 { 1553 int ret; 1554 1555 ret = radix_tree_preload(GFP_NOFS); 1556 if (ret) 1557 return ret; 1558 1559 spin_lock(&fs_info->fs_roots_radix_lock); 1560 ret = radix_tree_insert(&fs_info->fs_roots_radix, 1561 (unsigned long)root->root_key.objectid, 1562 root); 1563 if (ret == 0) 1564 set_bit(BTRFS_ROOT_IN_RADIX, &root->state); 1565 spin_unlock(&fs_info->fs_roots_radix_lock); 1566 radix_tree_preload_end(); 1567 1568 return ret; 1569 } 1570 1571 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, 1572 struct btrfs_key *location, 1573 bool check_ref) 1574 { 1575 struct btrfs_root *root; 1576 struct btrfs_path *path; 1577 struct btrfs_key key; 1578 int ret; 1579 1580 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) 1581 return fs_info->tree_root; 1582 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) 1583 return fs_info->extent_root; 1584 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) 1585 return fs_info->chunk_root; 1586 if (location->objectid == BTRFS_DEV_TREE_OBJECTID) 1587 return fs_info->dev_root; 1588 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) 1589 return fs_info->csum_root; 1590 if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) 1591 return fs_info->quota_root ? fs_info->quota_root : 1592 ERR_PTR(-ENOENT); 1593 if (location->objectid == BTRFS_UUID_TREE_OBJECTID) 1594 return fs_info->uuid_root ? fs_info->uuid_root : 1595 ERR_PTR(-ENOENT); 1596 if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) 1597 return fs_info->free_space_root ? fs_info->free_space_root : 1598 ERR_PTR(-ENOENT); 1599 again: 1600 root = btrfs_lookup_fs_root(fs_info, location->objectid); 1601 if (root) { 1602 if (check_ref && btrfs_root_refs(&root->root_item) == 0) 1603 return ERR_PTR(-ENOENT); 1604 return root; 1605 } 1606 1607 root = btrfs_read_fs_root(fs_info->tree_root, location); 1608 if (IS_ERR(root)) 1609 return root; 1610 1611 if (check_ref && btrfs_root_refs(&root->root_item) == 0) { 1612 ret = -ENOENT; 1613 goto fail; 1614 } 1615 1616 ret = btrfs_init_fs_root(root); 1617 if (ret) 1618 goto fail; 1619 1620 path = btrfs_alloc_path(); 1621 if (!path) { 1622 ret = -ENOMEM; 1623 goto fail; 1624 } 1625 key.objectid = BTRFS_ORPHAN_OBJECTID; 1626 key.type = BTRFS_ORPHAN_ITEM_KEY; 1627 key.offset = location->objectid; 1628 1629 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 1630 btrfs_free_path(path); 1631 if (ret < 0) 1632 goto fail; 1633 if (ret == 0) 1634 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); 1635 1636 ret = btrfs_insert_fs_root(fs_info, root); 1637 if (ret) { 1638 if (ret == -EEXIST) { 1639 btrfs_free_fs_root(root); 1640 goto again; 1641 } 1642 goto fail; 1643 } 1644 return root; 1645 fail: 1646 btrfs_free_fs_root(root); 1647 return ERR_PTR(ret); 1648 } 1649 1650 static int btrfs_congested_fn(void *congested_data, int bdi_bits) 1651 { 1652 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; 1653 int ret = 0; 1654 struct btrfs_device *device; 1655 struct backing_dev_info *bdi; 1656 1657 rcu_read_lock(); 1658 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { 1659 if (!device->bdev) 1660 continue; 1661 bdi = device->bdev->bd_bdi; 1662 if (bdi_congested(bdi, bdi_bits)) { 1663 ret = 1; 1664 break; 1665 } 1666 } 1667 rcu_read_unlock(); 1668 return ret; 1669 } 1670 1671 /* 1672 * called by the kthread helper functions to finally call the bio end_io 1673 * functions. This is where read checksum verification actually happens 1674 */ 1675 static void end_workqueue_fn(struct btrfs_work *work) 1676 { 1677 struct bio *bio; 1678 struct btrfs_end_io_wq *end_io_wq; 1679 1680 end_io_wq = container_of(work, struct btrfs_end_io_wq, work); 1681 bio = end_io_wq->bio; 1682 1683 bio->bi_status = end_io_wq->status; 1684 bio->bi_private = end_io_wq->private; 1685 bio->bi_end_io = end_io_wq->end_io; 1686 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); 1687 bio_endio(bio); 1688 } 1689 1690 static int cleaner_kthread(void *arg) 1691 { 1692 struct btrfs_root *root = arg; 1693 struct btrfs_fs_info *fs_info = root->fs_info; 1694 int again; 1695 1696 while (1) { 1697 again = 0; 1698 1699 set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); 1700 1701 /* Make the cleaner go to sleep early. */ 1702 if (btrfs_need_cleaner_sleep(fs_info)) 1703 goto sleep; 1704 1705 /* 1706 * Do not do anything if we might cause open_ctree() to block 1707 * before we have finished mounting the filesystem. 1708 */ 1709 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1710 goto sleep; 1711 1712 if (!mutex_trylock(&fs_info->cleaner_mutex)) 1713 goto sleep; 1714 1715 /* 1716 * Avoid the problem that we change the status of the fs 1717 * during the above check and trylock. 1718 */ 1719 if (btrfs_need_cleaner_sleep(fs_info)) { 1720 mutex_unlock(&fs_info->cleaner_mutex); 1721 goto sleep; 1722 } 1723 1724 btrfs_run_delayed_iputs(fs_info); 1725 1726 again = btrfs_clean_one_deleted_snapshot(root); 1727 mutex_unlock(&fs_info->cleaner_mutex); 1728 1729 /* 1730 * The defragger has dealt with the R/O remount and umount, 1731 * needn't do anything special here. 1732 */ 1733 btrfs_run_defrag_inodes(fs_info); 1734 1735 /* 1736 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing 1737 * with relocation (btrfs_relocate_chunk) and relocation 1738 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) 1739 * after acquiring fs_info->delete_unused_bgs_mutex. So we 1740 * can't hold, nor need to, fs_info->cleaner_mutex when deleting 1741 * unused block groups. 1742 */ 1743 btrfs_delete_unused_bgs(fs_info); 1744 sleep: 1745 clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); 1746 if (kthread_should_park()) 1747 kthread_parkme(); 1748 if (kthread_should_stop()) 1749 return 0; 1750 if (!again) { 1751 set_current_state(TASK_INTERRUPTIBLE); 1752 schedule(); 1753 __set_current_state(TASK_RUNNING); 1754 } 1755 } 1756 } 1757 1758 static int transaction_kthread(void *arg) 1759 { 1760 struct btrfs_root *root = arg; 1761 struct btrfs_fs_info *fs_info = root->fs_info; 1762 struct btrfs_trans_handle *trans; 1763 struct btrfs_transaction *cur; 1764 u64 transid; 1765 time64_t now; 1766 unsigned long delay; 1767 bool cannot_commit; 1768 1769 do { 1770 cannot_commit = false; 1771 delay = HZ * fs_info->commit_interval; 1772 mutex_lock(&fs_info->transaction_kthread_mutex); 1773 1774 spin_lock(&fs_info->trans_lock); 1775 cur = fs_info->running_transaction; 1776 if (!cur) { 1777 spin_unlock(&fs_info->trans_lock); 1778 goto sleep; 1779 } 1780 1781 now = ktime_get_seconds(); 1782 if (cur->state < TRANS_STATE_BLOCKED && 1783 !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && 1784 (now < cur->start_time || 1785 now - cur->start_time < fs_info->commit_interval)) { 1786 spin_unlock(&fs_info->trans_lock); 1787 delay = HZ * 5; 1788 goto sleep; 1789 } 1790 transid = cur->transid; 1791 spin_unlock(&fs_info->trans_lock); 1792 1793 /* If the file system is aborted, this will always fail. */ 1794 trans = btrfs_attach_transaction(root); 1795 if (IS_ERR(trans)) { 1796 if (PTR_ERR(trans) != -ENOENT) 1797 cannot_commit = true; 1798 goto sleep; 1799 } 1800 if (transid == trans->transid) { 1801 btrfs_commit_transaction(trans); 1802 } else { 1803 btrfs_end_transaction(trans); 1804 } 1805 sleep: 1806 wake_up_process(fs_info->cleaner_kthread); 1807 mutex_unlock(&fs_info->transaction_kthread_mutex); 1808 1809 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, 1810 &fs_info->fs_state))) 1811 btrfs_cleanup_transaction(fs_info); 1812 if (!kthread_should_stop() && 1813 (!btrfs_transaction_blocked(fs_info) || 1814 cannot_commit)) 1815 schedule_timeout_interruptible(delay); 1816 } while (!kthread_should_stop()); 1817 return 0; 1818 } 1819 1820 /* 1821 * this will find the highest generation in the array of 1822 * root backups. The index of the highest array is returned, 1823 * or -1 if we can't find anything. 1824 * 1825 * We check to make sure the array is valid by comparing the 1826 * generation of the latest root in the array with the generation 1827 * in the super block. If they don't match we pitch it. 1828 */ 1829 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) 1830 { 1831 u64 cur; 1832 int newest_index = -1; 1833 struct btrfs_root_backup *root_backup; 1834 int i; 1835 1836 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { 1837 root_backup = info->super_copy->super_roots + i; 1838 cur = btrfs_backup_tree_root_gen(root_backup); 1839 if (cur == newest_gen) 1840 newest_index = i; 1841 } 1842 1843 /* check to see if we actually wrapped around */ 1844 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { 1845 root_backup = info->super_copy->super_roots; 1846 cur = btrfs_backup_tree_root_gen(root_backup); 1847 if (cur == newest_gen) 1848 newest_index = 0; 1849 } 1850 return newest_index; 1851 } 1852 1853 1854 /* 1855 * find the oldest backup so we know where to store new entries 1856 * in the backup array. This will set the backup_root_index 1857 * field in the fs_info struct 1858 */ 1859 static void find_oldest_super_backup(struct btrfs_fs_info *info, 1860 u64 newest_gen) 1861 { 1862 int newest_index = -1; 1863 1864 newest_index = find_newest_super_backup(info, newest_gen); 1865 /* if there was garbage in there, just move along */ 1866 if (newest_index == -1) { 1867 info->backup_root_index = 0; 1868 } else { 1869 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; 1870 } 1871 } 1872 1873 /* 1874 * copy all the root pointers into the super backup array. 1875 * this will bump the backup pointer by one when it is 1876 * done 1877 */ 1878 static void backup_super_roots(struct btrfs_fs_info *info) 1879 { 1880 int next_backup; 1881 struct btrfs_root_backup *root_backup; 1882 int last_backup; 1883 1884 next_backup = info->backup_root_index; 1885 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % 1886 BTRFS_NUM_BACKUP_ROOTS; 1887 1888 /* 1889 * just overwrite the last backup if we're at the same generation 1890 * this happens only at umount 1891 */ 1892 root_backup = info->super_for_commit->super_roots + last_backup; 1893 if (btrfs_backup_tree_root_gen(root_backup) == 1894 btrfs_header_generation(info->tree_root->node)) 1895 next_backup = last_backup; 1896 1897 root_backup = info->super_for_commit->super_roots + next_backup; 1898 1899 /* 1900 * make sure all of our padding and empty slots get zero filled 1901 * regardless of which ones we use today 1902 */ 1903 memset(root_backup, 0, sizeof(*root_backup)); 1904 1905 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; 1906 1907 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); 1908 btrfs_set_backup_tree_root_gen(root_backup, 1909 btrfs_header_generation(info->tree_root->node)); 1910 1911 btrfs_set_backup_tree_root_level(root_backup, 1912 btrfs_header_level(info->tree_root->node)); 1913 1914 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); 1915 btrfs_set_backup_chunk_root_gen(root_backup, 1916 btrfs_header_generation(info->chunk_root->node)); 1917 btrfs_set_backup_chunk_root_level(root_backup, 1918 btrfs_header_level(info->chunk_root->node)); 1919 1920 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); 1921 btrfs_set_backup_extent_root_gen(root_backup, 1922 btrfs_header_generation(info->extent_root->node)); 1923 btrfs_set_backup_extent_root_level(root_backup, 1924 btrfs_header_level(info->extent_root->node)); 1925 1926 /* 1927 * we might commit during log recovery, which happens before we set 1928 * the fs_root. Make sure it is valid before we fill it in. 1929 */ 1930 if (info->fs_root && info->fs_root->node) { 1931 btrfs_set_backup_fs_root(root_backup, 1932 info->fs_root->node->start); 1933 btrfs_set_backup_fs_root_gen(root_backup, 1934 btrfs_header_generation(info->fs_root->node)); 1935 btrfs_set_backup_fs_root_level(root_backup, 1936 btrfs_header_level(info->fs_root->node)); 1937 } 1938 1939 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); 1940 btrfs_set_backup_dev_root_gen(root_backup, 1941 btrfs_header_generation(info->dev_root->node)); 1942 btrfs_set_backup_dev_root_level(root_backup, 1943 btrfs_header_level(info->dev_root->node)); 1944 1945 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); 1946 btrfs_set_backup_csum_root_gen(root_backup, 1947 btrfs_header_generation(info->csum_root->node)); 1948 btrfs_set_backup_csum_root_level(root_backup, 1949 btrfs_header_level(info->csum_root->node)); 1950 1951 btrfs_set_backup_total_bytes(root_backup, 1952 btrfs_super_total_bytes(info->super_copy)); 1953 btrfs_set_backup_bytes_used(root_backup, 1954 btrfs_super_bytes_used(info->super_copy)); 1955 btrfs_set_backup_num_devices(root_backup, 1956 btrfs_super_num_devices(info->super_copy)); 1957 1958 /* 1959 * if we don't copy this out to the super_copy, it won't get remembered 1960 * for the next commit 1961 */ 1962 memcpy(&info->super_copy->super_roots, 1963 &info->super_for_commit->super_roots, 1964 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); 1965 } 1966 1967 /* 1968 * this copies info out of the root backup array and back into 1969 * the in-memory super block. It is meant to help iterate through 1970 * the array, so you send it the number of backups you've already 1971 * tried and the last backup index you used. 1972 * 1973 * this returns -1 when it has tried all the backups 1974 */ 1975 static noinline int next_root_backup(struct btrfs_fs_info *info, 1976 struct btrfs_super_block *super, 1977 int *num_backups_tried, int *backup_index) 1978 { 1979 struct btrfs_root_backup *root_backup; 1980 int newest = *backup_index; 1981 1982 if (*num_backups_tried == 0) { 1983 u64 gen = btrfs_super_generation(super); 1984 1985 newest = find_newest_super_backup(info, gen); 1986 if (newest == -1) 1987 return -1; 1988 1989 *backup_index = newest; 1990 *num_backups_tried = 1; 1991 } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { 1992 /* we've tried all the backups, all done */ 1993 return -1; 1994 } else { 1995 /* jump to the next oldest backup */ 1996 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % 1997 BTRFS_NUM_BACKUP_ROOTS; 1998 *backup_index = newest; 1999 *num_backups_tried += 1; 2000 } 2001 root_backup = super->super_roots + newest; 2002 2003 btrfs_set_super_generation(super, 2004 btrfs_backup_tree_root_gen(root_backup)); 2005 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); 2006 btrfs_set_super_root_level(super, 2007 btrfs_backup_tree_root_level(root_backup)); 2008 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); 2009 2010 /* 2011 * fixme: the total bytes and num_devices need to match or we should 2012 * need a fsck 2013 */ 2014 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); 2015 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); 2016 return 0; 2017 } 2018 2019 /* helper to cleanup workers */ 2020 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) 2021 { 2022 btrfs_destroy_workqueue(fs_info->fixup_workers); 2023 btrfs_destroy_workqueue(fs_info->delalloc_workers); 2024 btrfs_destroy_workqueue(fs_info->workers); 2025 btrfs_destroy_workqueue(fs_info->endio_workers); 2026 btrfs_destroy_workqueue(fs_info->endio_raid56_workers); 2027 btrfs_destroy_workqueue(fs_info->endio_repair_workers); 2028 btrfs_destroy_workqueue(fs_info->rmw_workers); 2029 btrfs_destroy_workqueue(fs_info->endio_write_workers); 2030 btrfs_destroy_workqueue(fs_info->endio_freespace_worker); 2031 btrfs_destroy_workqueue(fs_info->submit_workers); 2032 btrfs_destroy_workqueue(fs_info->delayed_workers); 2033 btrfs_destroy_workqueue(fs_info->caching_workers); 2034 btrfs_destroy_workqueue(fs_info->readahead_workers); 2035 btrfs_destroy_workqueue(fs_info->flush_workers); 2036 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); 2037 btrfs_destroy_workqueue(fs_info->extent_workers); 2038 /* 2039 * Now that all other work queues are destroyed, we can safely destroy 2040 * the queues used for metadata I/O, since tasks from those other work 2041 * queues can do metadata I/O operations. 2042 */ 2043 btrfs_destroy_workqueue(fs_info->endio_meta_workers); 2044 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); 2045 } 2046 2047 static void free_root_extent_buffers(struct btrfs_root *root) 2048 { 2049 if (root) { 2050 free_extent_buffer(root->node); 2051 free_extent_buffer(root->commit_root); 2052 root->node = NULL; 2053 root->commit_root = NULL; 2054 } 2055 } 2056 2057 /* helper to cleanup tree roots */ 2058 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) 2059 { 2060 free_root_extent_buffers(info->tree_root); 2061 2062 free_root_extent_buffers(info->dev_root); 2063 free_root_extent_buffers(info->extent_root); 2064 free_root_extent_buffers(info->csum_root); 2065 free_root_extent_buffers(info->quota_root); 2066 free_root_extent_buffers(info->uuid_root); 2067 if (chunk_root) 2068 free_root_extent_buffers(info->chunk_root); 2069 free_root_extent_buffers(info->free_space_root); 2070 } 2071 2072 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) 2073 { 2074 int ret; 2075 struct btrfs_root *gang[8]; 2076 int i; 2077 2078 while (!list_empty(&fs_info->dead_roots)) { 2079 gang[0] = list_entry(fs_info->dead_roots.next, 2080 struct btrfs_root, root_list); 2081 list_del(&gang[0]->root_list); 2082 2083 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) { 2084 btrfs_drop_and_free_fs_root(fs_info, gang[0]); 2085 } else { 2086 free_extent_buffer(gang[0]->node); 2087 free_extent_buffer(gang[0]->commit_root); 2088 btrfs_put_fs_root(gang[0]); 2089 } 2090 } 2091 2092 while (1) { 2093 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 2094 (void **)gang, 0, 2095 ARRAY_SIZE(gang)); 2096 if (!ret) 2097 break; 2098 for (i = 0; i < ret; i++) 2099 btrfs_drop_and_free_fs_root(fs_info, gang[i]); 2100 } 2101 2102 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 2103 btrfs_free_log_root_tree(NULL, fs_info); 2104 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); 2105 } 2106 } 2107 2108 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) 2109 { 2110 mutex_init(&fs_info->scrub_lock); 2111 atomic_set(&fs_info->scrubs_running, 0); 2112 atomic_set(&fs_info->scrub_pause_req, 0); 2113 atomic_set(&fs_info->scrubs_paused, 0); 2114 atomic_set(&fs_info->scrub_cancel_req, 0); 2115 init_waitqueue_head(&fs_info->scrub_pause_wait); 2116 refcount_set(&fs_info->scrub_workers_refcnt, 0); 2117 } 2118 2119 static void btrfs_init_balance(struct btrfs_fs_info *fs_info) 2120 { 2121 spin_lock_init(&fs_info->balance_lock); 2122 mutex_init(&fs_info->balance_mutex); 2123 atomic_set(&fs_info->balance_pause_req, 0); 2124 atomic_set(&fs_info->balance_cancel_req, 0); 2125 fs_info->balance_ctl = NULL; 2126 init_waitqueue_head(&fs_info->balance_wait_q); 2127 } 2128 2129 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info) 2130 { 2131 struct inode *inode = fs_info->btree_inode; 2132 2133 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; 2134 set_nlink(inode, 1); 2135 /* 2136 * we set the i_size on the btree inode to the max possible int. 2137 * the real end of the address space is determined by all of 2138 * the devices in the system 2139 */ 2140 inode->i_size = OFFSET_MAX; 2141 inode->i_mapping->a_ops = &btree_aops; 2142 2143 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 2144 extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree, 2145 IO_TREE_INODE_IO, inode); 2146 BTRFS_I(inode)->io_tree.track_uptodate = false; 2147 extent_map_tree_init(&BTRFS_I(inode)->extent_tree); 2148 2149 BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops; 2150 2151 BTRFS_I(inode)->root = fs_info->tree_root; 2152 memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key)); 2153 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 2154 btrfs_insert_inode_hash(inode); 2155 } 2156 2157 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) 2158 { 2159 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); 2160 init_rwsem(&fs_info->dev_replace.rwsem); 2161 init_waitqueue_head(&fs_info->dev_replace.replace_wait); 2162 } 2163 2164 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) 2165 { 2166 spin_lock_init(&fs_info->qgroup_lock); 2167 mutex_init(&fs_info->qgroup_ioctl_lock); 2168 fs_info->qgroup_tree = RB_ROOT; 2169 INIT_LIST_HEAD(&fs_info->dirty_qgroups); 2170 fs_info->qgroup_seq = 1; 2171 fs_info->qgroup_ulist = NULL; 2172 fs_info->qgroup_rescan_running = false; 2173 mutex_init(&fs_info->qgroup_rescan_lock); 2174 } 2175 2176 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, 2177 struct btrfs_fs_devices *fs_devices) 2178 { 2179 u32 max_active = fs_info->thread_pool_size; 2180 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; 2181 2182 fs_info->workers = 2183 btrfs_alloc_workqueue(fs_info, "worker", 2184 flags | WQ_HIGHPRI, max_active, 16); 2185 2186 fs_info->delalloc_workers = 2187 btrfs_alloc_workqueue(fs_info, "delalloc", 2188 flags, max_active, 2); 2189 2190 fs_info->flush_workers = 2191 btrfs_alloc_workqueue(fs_info, "flush_delalloc", 2192 flags, max_active, 0); 2193 2194 fs_info->caching_workers = 2195 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0); 2196 2197 /* 2198 * a higher idle thresh on the submit workers makes it much more 2199 * likely that bios will be send down in a sane order to the 2200 * devices 2201 */ 2202 fs_info->submit_workers = 2203 btrfs_alloc_workqueue(fs_info, "submit", flags, 2204 min_t(u64, fs_devices->num_devices, 2205 max_active), 64); 2206 2207 fs_info->fixup_workers = 2208 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0); 2209 2210 /* 2211 * endios are largely parallel and should have a very 2212 * low idle thresh 2213 */ 2214 fs_info->endio_workers = 2215 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4); 2216 fs_info->endio_meta_workers = 2217 btrfs_alloc_workqueue(fs_info, "endio-meta", flags, 2218 max_active, 4); 2219 fs_info->endio_meta_write_workers = 2220 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags, 2221 max_active, 2); 2222 fs_info->endio_raid56_workers = 2223 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, 2224 max_active, 4); 2225 fs_info->endio_repair_workers = 2226 btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0); 2227 fs_info->rmw_workers = 2228 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2); 2229 fs_info->endio_write_workers = 2230 btrfs_alloc_workqueue(fs_info, "endio-write", flags, 2231 max_active, 2); 2232 fs_info->endio_freespace_worker = 2233 btrfs_alloc_workqueue(fs_info, "freespace-write", flags, 2234 max_active, 0); 2235 fs_info->delayed_workers = 2236 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags, 2237 max_active, 0); 2238 fs_info->readahead_workers = 2239 btrfs_alloc_workqueue(fs_info, "readahead", flags, 2240 max_active, 2); 2241 fs_info->qgroup_rescan_workers = 2242 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0); 2243 fs_info->extent_workers = 2244 btrfs_alloc_workqueue(fs_info, "extent-refs", flags, 2245 min_t(u64, fs_devices->num_devices, 2246 max_active), 8); 2247 2248 if (!(fs_info->workers && fs_info->delalloc_workers && 2249 fs_info->submit_workers && fs_info->flush_workers && 2250 fs_info->endio_workers && fs_info->endio_meta_workers && 2251 fs_info->endio_meta_write_workers && 2252 fs_info->endio_repair_workers && 2253 fs_info->endio_write_workers && fs_info->endio_raid56_workers && 2254 fs_info->endio_freespace_worker && fs_info->rmw_workers && 2255 fs_info->caching_workers && fs_info->readahead_workers && 2256 fs_info->fixup_workers && fs_info->delayed_workers && 2257 fs_info->extent_workers && 2258 fs_info->qgroup_rescan_workers)) { 2259 return -ENOMEM; 2260 } 2261 2262 return 0; 2263 } 2264 2265 static int btrfs_replay_log(struct btrfs_fs_info *fs_info, 2266 struct btrfs_fs_devices *fs_devices) 2267 { 2268 int ret; 2269 struct btrfs_root *log_tree_root; 2270 struct btrfs_super_block *disk_super = fs_info->super_copy; 2271 u64 bytenr = btrfs_super_log_root(disk_super); 2272 int level = btrfs_super_log_root_level(disk_super); 2273 2274 if (fs_devices->rw_devices == 0) { 2275 btrfs_warn(fs_info, "log replay required on RO media"); 2276 return -EIO; 2277 } 2278 2279 log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2280 if (!log_tree_root) 2281 return -ENOMEM; 2282 2283 __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); 2284 2285 log_tree_root->node = read_tree_block(fs_info, bytenr, 2286 fs_info->generation + 1, 2287 level, NULL); 2288 if (IS_ERR(log_tree_root->node)) { 2289 btrfs_warn(fs_info, "failed to read log tree"); 2290 ret = PTR_ERR(log_tree_root->node); 2291 kfree(log_tree_root); 2292 return ret; 2293 } else if (!extent_buffer_uptodate(log_tree_root->node)) { 2294 btrfs_err(fs_info, "failed to read log tree"); 2295 free_extent_buffer(log_tree_root->node); 2296 kfree(log_tree_root); 2297 return -EIO; 2298 } 2299 /* returns with log_tree_root freed on success */ 2300 ret = btrfs_recover_log_trees(log_tree_root); 2301 if (ret) { 2302 btrfs_handle_fs_error(fs_info, ret, 2303 "Failed to recover log tree"); 2304 free_extent_buffer(log_tree_root->node); 2305 kfree(log_tree_root); 2306 return ret; 2307 } 2308 2309 if (sb_rdonly(fs_info->sb)) { 2310 ret = btrfs_commit_super(fs_info); 2311 if (ret) 2312 return ret; 2313 } 2314 2315 return 0; 2316 } 2317 2318 static int btrfs_read_roots(struct btrfs_fs_info *fs_info) 2319 { 2320 struct btrfs_root *tree_root = fs_info->tree_root; 2321 struct btrfs_root *root; 2322 struct btrfs_key location; 2323 int ret; 2324 2325 BUG_ON(!fs_info->tree_root); 2326 2327 location.objectid = BTRFS_EXTENT_TREE_OBJECTID; 2328 location.type = BTRFS_ROOT_ITEM_KEY; 2329 location.offset = 0; 2330 2331 root = btrfs_read_tree_root(tree_root, &location); 2332 if (IS_ERR(root)) { 2333 ret = PTR_ERR(root); 2334 goto out; 2335 } 2336 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2337 fs_info->extent_root = root; 2338 2339 location.objectid = BTRFS_DEV_TREE_OBJECTID; 2340 root = btrfs_read_tree_root(tree_root, &location); 2341 if (IS_ERR(root)) { 2342 ret = PTR_ERR(root); 2343 goto out; 2344 } 2345 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2346 fs_info->dev_root = root; 2347 btrfs_init_devices_late(fs_info); 2348 2349 location.objectid = BTRFS_CSUM_TREE_OBJECTID; 2350 root = btrfs_read_tree_root(tree_root, &location); 2351 if (IS_ERR(root)) { 2352 ret = PTR_ERR(root); 2353 goto out; 2354 } 2355 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2356 fs_info->csum_root = root; 2357 2358 location.objectid = BTRFS_QUOTA_TREE_OBJECTID; 2359 root = btrfs_read_tree_root(tree_root, &location); 2360 if (!IS_ERR(root)) { 2361 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2362 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 2363 fs_info->quota_root = root; 2364 } 2365 2366 location.objectid = BTRFS_UUID_TREE_OBJECTID; 2367 root = btrfs_read_tree_root(tree_root, &location); 2368 if (IS_ERR(root)) { 2369 ret = PTR_ERR(root); 2370 if (ret != -ENOENT) 2371 goto out; 2372 } else { 2373 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2374 fs_info->uuid_root = root; 2375 } 2376 2377 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 2378 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; 2379 root = btrfs_read_tree_root(tree_root, &location); 2380 if (IS_ERR(root)) { 2381 ret = PTR_ERR(root); 2382 goto out; 2383 } 2384 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2385 fs_info->free_space_root = root; 2386 } 2387 2388 return 0; 2389 out: 2390 btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d", 2391 location.objectid, ret); 2392 return ret; 2393 } 2394 2395 /* 2396 * Real super block validation 2397 * NOTE: super csum type and incompat features will not be checked here. 2398 * 2399 * @sb: super block to check 2400 * @mirror_num: the super block number to check its bytenr: 2401 * 0 the primary (1st) sb 2402 * 1, 2 2nd and 3rd backup copy 2403 * -1 skip bytenr check 2404 */ 2405 static int validate_super(struct btrfs_fs_info *fs_info, 2406 struct btrfs_super_block *sb, int mirror_num) 2407 { 2408 u64 nodesize = btrfs_super_nodesize(sb); 2409 u64 sectorsize = btrfs_super_sectorsize(sb); 2410 int ret = 0; 2411 2412 if (btrfs_super_magic(sb) != BTRFS_MAGIC) { 2413 btrfs_err(fs_info, "no valid FS found"); 2414 ret = -EINVAL; 2415 } 2416 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) { 2417 btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu", 2418 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); 2419 ret = -EINVAL; 2420 } 2421 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { 2422 btrfs_err(fs_info, "tree_root level too big: %d >= %d", 2423 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); 2424 ret = -EINVAL; 2425 } 2426 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { 2427 btrfs_err(fs_info, "chunk_root level too big: %d >= %d", 2428 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); 2429 ret = -EINVAL; 2430 } 2431 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { 2432 btrfs_err(fs_info, "log_root level too big: %d >= %d", 2433 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); 2434 ret = -EINVAL; 2435 } 2436 2437 /* 2438 * Check sectorsize and nodesize first, other check will need it. 2439 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here. 2440 */ 2441 if (!is_power_of_2(sectorsize) || sectorsize < 4096 || 2442 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { 2443 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize); 2444 ret = -EINVAL; 2445 } 2446 /* Only PAGE SIZE is supported yet */ 2447 if (sectorsize != PAGE_SIZE) { 2448 btrfs_err(fs_info, 2449 "sectorsize %llu not supported yet, only support %lu", 2450 sectorsize, PAGE_SIZE); 2451 ret = -EINVAL; 2452 } 2453 if (!is_power_of_2(nodesize) || nodesize < sectorsize || 2454 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) { 2455 btrfs_err(fs_info, "invalid nodesize %llu", nodesize); 2456 ret = -EINVAL; 2457 } 2458 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) { 2459 btrfs_err(fs_info, "invalid leafsize %u, should be %llu", 2460 le32_to_cpu(sb->__unused_leafsize), nodesize); 2461 ret = -EINVAL; 2462 } 2463 2464 /* Root alignment check */ 2465 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { 2466 btrfs_warn(fs_info, "tree_root block unaligned: %llu", 2467 btrfs_super_root(sb)); 2468 ret = -EINVAL; 2469 } 2470 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { 2471 btrfs_warn(fs_info, "chunk_root block unaligned: %llu", 2472 btrfs_super_chunk_root(sb)); 2473 ret = -EINVAL; 2474 } 2475 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { 2476 btrfs_warn(fs_info, "log_root block unaligned: %llu", 2477 btrfs_super_log_root(sb)); 2478 ret = -EINVAL; 2479 } 2480 2481 if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid, 2482 BTRFS_FSID_SIZE) != 0) { 2483 btrfs_err(fs_info, 2484 "dev_item UUID does not match metadata fsid: %pU != %pU", 2485 fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid); 2486 ret = -EINVAL; 2487 } 2488 2489 /* 2490 * Hint to catch really bogus numbers, bitflips or so, more exact checks are 2491 * done later 2492 */ 2493 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) { 2494 btrfs_err(fs_info, "bytes_used is too small %llu", 2495 btrfs_super_bytes_used(sb)); 2496 ret = -EINVAL; 2497 } 2498 if (!is_power_of_2(btrfs_super_stripesize(sb))) { 2499 btrfs_err(fs_info, "invalid stripesize %u", 2500 btrfs_super_stripesize(sb)); 2501 ret = -EINVAL; 2502 } 2503 if (btrfs_super_num_devices(sb) > (1UL << 31)) 2504 btrfs_warn(fs_info, "suspicious number of devices: %llu", 2505 btrfs_super_num_devices(sb)); 2506 if (btrfs_super_num_devices(sb) == 0) { 2507 btrfs_err(fs_info, "number of devices is 0"); 2508 ret = -EINVAL; 2509 } 2510 2511 if (mirror_num >= 0 && 2512 btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) { 2513 btrfs_err(fs_info, "super offset mismatch %llu != %u", 2514 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); 2515 ret = -EINVAL; 2516 } 2517 2518 /* 2519 * Obvious sys_chunk_array corruptions, it must hold at least one key 2520 * and one chunk 2521 */ 2522 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 2523 btrfs_err(fs_info, "system chunk array too big %u > %u", 2524 btrfs_super_sys_array_size(sb), 2525 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); 2526 ret = -EINVAL; 2527 } 2528 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) 2529 + sizeof(struct btrfs_chunk)) { 2530 btrfs_err(fs_info, "system chunk array too small %u < %zu", 2531 btrfs_super_sys_array_size(sb), 2532 sizeof(struct btrfs_disk_key) 2533 + sizeof(struct btrfs_chunk)); 2534 ret = -EINVAL; 2535 } 2536 2537 /* 2538 * The generation is a global counter, we'll trust it more than the others 2539 * but it's still possible that it's the one that's wrong. 2540 */ 2541 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) 2542 btrfs_warn(fs_info, 2543 "suspicious: generation < chunk_root_generation: %llu < %llu", 2544 btrfs_super_generation(sb), 2545 btrfs_super_chunk_root_generation(sb)); 2546 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) 2547 && btrfs_super_cache_generation(sb) != (u64)-1) 2548 btrfs_warn(fs_info, 2549 "suspicious: generation < cache_generation: %llu < %llu", 2550 btrfs_super_generation(sb), 2551 btrfs_super_cache_generation(sb)); 2552 2553 return ret; 2554 } 2555 2556 /* 2557 * Validation of super block at mount time. 2558 * Some checks already done early at mount time, like csum type and incompat 2559 * flags will be skipped. 2560 */ 2561 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info) 2562 { 2563 return validate_super(fs_info, fs_info->super_copy, 0); 2564 } 2565 2566 /* 2567 * Validation of super block at write time. 2568 * Some checks like bytenr check will be skipped as their values will be 2569 * overwritten soon. 2570 * Extra checks like csum type and incompat flags will be done here. 2571 */ 2572 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info, 2573 struct btrfs_super_block *sb) 2574 { 2575 int ret; 2576 2577 ret = validate_super(fs_info, sb, -1); 2578 if (ret < 0) 2579 goto out; 2580 if (btrfs_super_csum_type(sb) != BTRFS_CSUM_TYPE_CRC32) { 2581 ret = -EUCLEAN; 2582 btrfs_err(fs_info, "invalid csum type, has %u want %u", 2583 btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32); 2584 goto out; 2585 } 2586 if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) { 2587 ret = -EUCLEAN; 2588 btrfs_err(fs_info, 2589 "invalid incompat flags, has 0x%llx valid mask 0x%llx", 2590 btrfs_super_incompat_flags(sb), 2591 (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP); 2592 goto out; 2593 } 2594 out: 2595 if (ret < 0) 2596 btrfs_err(fs_info, 2597 "super block corruption detected before writing it to disk"); 2598 return ret; 2599 } 2600 2601 int open_ctree(struct super_block *sb, 2602 struct btrfs_fs_devices *fs_devices, 2603 char *options) 2604 { 2605 u32 sectorsize; 2606 u32 nodesize; 2607 u32 stripesize; 2608 u64 generation; 2609 u64 features; 2610 struct btrfs_key location; 2611 struct buffer_head *bh; 2612 struct btrfs_super_block *disk_super; 2613 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2614 struct btrfs_root *tree_root; 2615 struct btrfs_root *chunk_root; 2616 int ret; 2617 int err = -EINVAL; 2618 int num_backups_tried = 0; 2619 int backup_index = 0; 2620 int clear_free_space_tree = 0; 2621 int level; 2622 2623 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2624 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2625 if (!tree_root || !chunk_root) { 2626 err = -ENOMEM; 2627 goto fail; 2628 } 2629 2630 ret = init_srcu_struct(&fs_info->subvol_srcu); 2631 if (ret) { 2632 err = ret; 2633 goto fail; 2634 } 2635 2636 ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL); 2637 if (ret) { 2638 err = ret; 2639 goto fail_srcu; 2640 } 2641 2642 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); 2643 if (ret) { 2644 err = ret; 2645 goto fail_dio_bytes; 2646 } 2647 fs_info->dirty_metadata_batch = PAGE_SIZE * 2648 (1 + ilog2(nr_cpu_ids)); 2649 2650 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); 2651 if (ret) { 2652 err = ret; 2653 goto fail_dirty_metadata_bytes; 2654 } 2655 2656 ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0, 2657 GFP_KERNEL); 2658 if (ret) { 2659 err = ret; 2660 goto fail_delalloc_bytes; 2661 } 2662 2663 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); 2664 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); 2665 INIT_LIST_HEAD(&fs_info->trans_list); 2666 INIT_LIST_HEAD(&fs_info->dead_roots); 2667 INIT_LIST_HEAD(&fs_info->delayed_iputs); 2668 INIT_LIST_HEAD(&fs_info->delalloc_roots); 2669 INIT_LIST_HEAD(&fs_info->caching_block_groups); 2670 INIT_LIST_HEAD(&fs_info->pending_raid_kobjs); 2671 spin_lock_init(&fs_info->pending_raid_kobjs_lock); 2672 spin_lock_init(&fs_info->delalloc_root_lock); 2673 spin_lock_init(&fs_info->trans_lock); 2674 spin_lock_init(&fs_info->fs_roots_radix_lock); 2675 spin_lock_init(&fs_info->delayed_iput_lock); 2676 spin_lock_init(&fs_info->defrag_inodes_lock); 2677 spin_lock_init(&fs_info->tree_mod_seq_lock); 2678 spin_lock_init(&fs_info->super_lock); 2679 spin_lock_init(&fs_info->buffer_lock); 2680 spin_lock_init(&fs_info->unused_bgs_lock); 2681 rwlock_init(&fs_info->tree_mod_log_lock); 2682 mutex_init(&fs_info->unused_bg_unpin_mutex); 2683 mutex_init(&fs_info->delete_unused_bgs_mutex); 2684 mutex_init(&fs_info->reloc_mutex); 2685 mutex_init(&fs_info->delalloc_root_mutex); 2686 seqlock_init(&fs_info->profiles_lock); 2687 2688 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 2689 INIT_LIST_HEAD(&fs_info->space_info); 2690 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); 2691 INIT_LIST_HEAD(&fs_info->unused_bgs); 2692 btrfs_mapping_init(&fs_info->mapping_tree); 2693 btrfs_init_block_rsv(&fs_info->global_block_rsv, 2694 BTRFS_BLOCK_RSV_GLOBAL); 2695 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); 2696 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); 2697 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); 2698 btrfs_init_block_rsv(&fs_info->delayed_block_rsv, 2699 BTRFS_BLOCK_RSV_DELOPS); 2700 btrfs_init_block_rsv(&fs_info->delayed_refs_rsv, 2701 BTRFS_BLOCK_RSV_DELREFS); 2702 2703 atomic_set(&fs_info->async_delalloc_pages, 0); 2704 atomic_set(&fs_info->defrag_running, 0); 2705 atomic_set(&fs_info->reada_works_cnt, 0); 2706 atomic_set(&fs_info->nr_delayed_iputs, 0); 2707 atomic64_set(&fs_info->tree_mod_seq, 0); 2708 fs_info->sb = sb; 2709 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; 2710 fs_info->metadata_ratio = 0; 2711 fs_info->defrag_inodes = RB_ROOT; 2712 atomic64_set(&fs_info->free_chunk_space, 0); 2713 fs_info->tree_mod_log = RB_ROOT; 2714 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 2715 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ 2716 /* readahead state */ 2717 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 2718 spin_lock_init(&fs_info->reada_lock); 2719 btrfs_init_ref_verify(fs_info); 2720 2721 fs_info->thread_pool_size = min_t(unsigned long, 2722 num_online_cpus() + 2, 8); 2723 2724 INIT_LIST_HEAD(&fs_info->ordered_roots); 2725 spin_lock_init(&fs_info->ordered_root_lock); 2726 2727 fs_info->btree_inode = new_inode(sb); 2728 if (!fs_info->btree_inode) { 2729 err = -ENOMEM; 2730 goto fail_bio_counter; 2731 } 2732 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); 2733 2734 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), 2735 GFP_KERNEL); 2736 if (!fs_info->delayed_root) { 2737 err = -ENOMEM; 2738 goto fail_iput; 2739 } 2740 btrfs_init_delayed_root(fs_info->delayed_root); 2741 2742 btrfs_init_scrub(fs_info); 2743 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 2744 fs_info->check_integrity_print_mask = 0; 2745 #endif 2746 btrfs_init_balance(fs_info); 2747 btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work); 2748 2749 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; 2750 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); 2751 2752 btrfs_init_btree_inode(fs_info); 2753 2754 spin_lock_init(&fs_info->block_group_cache_lock); 2755 fs_info->block_group_cache_tree = RB_ROOT; 2756 fs_info->first_logical_byte = (u64)-1; 2757 2758 extent_io_tree_init(fs_info, &fs_info->freed_extents[0], 2759 IO_TREE_FS_INFO_FREED_EXTENTS0, NULL); 2760 extent_io_tree_init(fs_info, &fs_info->freed_extents[1], 2761 IO_TREE_FS_INFO_FREED_EXTENTS1, NULL); 2762 fs_info->pinned_extents = &fs_info->freed_extents[0]; 2763 set_bit(BTRFS_FS_BARRIER, &fs_info->flags); 2764 2765 mutex_init(&fs_info->ordered_operations_mutex); 2766 mutex_init(&fs_info->tree_log_mutex); 2767 mutex_init(&fs_info->chunk_mutex); 2768 mutex_init(&fs_info->transaction_kthread_mutex); 2769 mutex_init(&fs_info->cleaner_mutex); 2770 mutex_init(&fs_info->ro_block_group_mutex); 2771 init_rwsem(&fs_info->commit_root_sem); 2772 init_rwsem(&fs_info->cleanup_work_sem); 2773 init_rwsem(&fs_info->subvol_sem); 2774 sema_init(&fs_info->uuid_tree_rescan_sem, 1); 2775 2776 btrfs_init_dev_replace_locks(fs_info); 2777 btrfs_init_qgroup(fs_info); 2778 2779 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); 2780 btrfs_init_free_cluster(&fs_info->data_alloc_cluster); 2781 2782 init_waitqueue_head(&fs_info->transaction_throttle); 2783 init_waitqueue_head(&fs_info->transaction_wait); 2784 init_waitqueue_head(&fs_info->transaction_blocked_wait); 2785 init_waitqueue_head(&fs_info->async_submit_wait); 2786 init_waitqueue_head(&fs_info->delayed_iputs_wait); 2787 2788 /* Usable values until the real ones are cached from the superblock */ 2789 fs_info->nodesize = 4096; 2790 fs_info->sectorsize = 4096; 2791 fs_info->stripesize = 4096; 2792 2793 spin_lock_init(&fs_info->swapfile_pins_lock); 2794 fs_info->swapfile_pins = RB_ROOT; 2795 2796 ret = btrfs_alloc_stripe_hash_table(fs_info); 2797 if (ret) { 2798 err = ret; 2799 goto fail_alloc; 2800 } 2801 2802 __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); 2803 2804 invalidate_bdev(fs_devices->latest_bdev); 2805 2806 /* 2807 * Read super block and check the signature bytes only 2808 */ 2809 bh = btrfs_read_dev_super(fs_devices->latest_bdev); 2810 if (IS_ERR(bh)) { 2811 err = PTR_ERR(bh); 2812 goto fail_alloc; 2813 } 2814 2815 /* 2816 * We want to check superblock checksum, the type is stored inside. 2817 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). 2818 */ 2819 if (btrfs_check_super_csum(fs_info, bh->b_data)) { 2820 btrfs_err(fs_info, "superblock checksum mismatch"); 2821 err = -EINVAL; 2822 brelse(bh); 2823 goto fail_alloc; 2824 } 2825 2826 /* 2827 * super_copy is zeroed at allocation time and we never touch the 2828 * following bytes up to INFO_SIZE, the checksum is calculated from 2829 * the whole block of INFO_SIZE 2830 */ 2831 memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); 2832 brelse(bh); 2833 2834 disk_super = fs_info->super_copy; 2835 2836 ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid, 2837 BTRFS_FSID_SIZE)); 2838 2839 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) { 2840 ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid, 2841 fs_info->super_copy->metadata_uuid, 2842 BTRFS_FSID_SIZE)); 2843 } 2844 2845 features = btrfs_super_flags(disk_super); 2846 if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) { 2847 features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2; 2848 btrfs_set_super_flags(disk_super, features); 2849 btrfs_info(fs_info, 2850 "found metadata UUID change in progress flag, clearing"); 2851 } 2852 2853 memcpy(fs_info->super_for_commit, fs_info->super_copy, 2854 sizeof(*fs_info->super_for_commit)); 2855 2856 ret = btrfs_validate_mount_super(fs_info); 2857 if (ret) { 2858 btrfs_err(fs_info, "superblock contains fatal errors"); 2859 err = -EINVAL; 2860 goto fail_alloc; 2861 } 2862 2863 if (!btrfs_super_root(disk_super)) 2864 goto fail_alloc; 2865 2866 /* check FS state, whether FS is broken. */ 2867 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) 2868 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); 2869 2870 /* 2871 * run through our array of backup supers and setup 2872 * our ring pointer to the oldest one 2873 */ 2874 generation = btrfs_super_generation(disk_super); 2875 find_oldest_super_backup(fs_info, generation); 2876 2877 /* 2878 * In the long term, we'll store the compression type in the super 2879 * block, and it'll be used for per file compression control. 2880 */ 2881 fs_info->compress_type = BTRFS_COMPRESS_ZLIB; 2882 2883 ret = btrfs_parse_options(fs_info, options, sb->s_flags); 2884 if (ret) { 2885 err = ret; 2886 goto fail_alloc; 2887 } 2888 2889 features = btrfs_super_incompat_flags(disk_super) & 2890 ~BTRFS_FEATURE_INCOMPAT_SUPP; 2891 if (features) { 2892 btrfs_err(fs_info, 2893 "cannot mount because of unsupported optional features (%llx)", 2894 features); 2895 err = -EINVAL; 2896 goto fail_alloc; 2897 } 2898 2899 features = btrfs_super_incompat_flags(disk_super); 2900 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; 2901 if (fs_info->compress_type == BTRFS_COMPRESS_LZO) 2902 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; 2903 else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD) 2904 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD; 2905 2906 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) 2907 btrfs_info(fs_info, "has skinny extents"); 2908 2909 /* 2910 * flag our filesystem as having big metadata blocks if 2911 * they are bigger than the page size 2912 */ 2913 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { 2914 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) 2915 btrfs_info(fs_info, 2916 "flagging fs with big metadata feature"); 2917 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; 2918 } 2919 2920 nodesize = btrfs_super_nodesize(disk_super); 2921 sectorsize = btrfs_super_sectorsize(disk_super); 2922 stripesize = sectorsize; 2923 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); 2924 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); 2925 2926 /* Cache block sizes */ 2927 fs_info->nodesize = nodesize; 2928 fs_info->sectorsize = sectorsize; 2929 fs_info->stripesize = stripesize; 2930 2931 /* 2932 * mixed block groups end up with duplicate but slightly offset 2933 * extent buffers for the same range. It leads to corruptions 2934 */ 2935 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && 2936 (sectorsize != nodesize)) { 2937 btrfs_err(fs_info, 2938 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups", 2939 nodesize, sectorsize); 2940 goto fail_alloc; 2941 } 2942 2943 /* 2944 * Needn't use the lock because there is no other task which will 2945 * update the flag. 2946 */ 2947 btrfs_set_super_incompat_flags(disk_super, features); 2948 2949 features = btrfs_super_compat_ro_flags(disk_super) & 2950 ~BTRFS_FEATURE_COMPAT_RO_SUPP; 2951 if (!sb_rdonly(sb) && features) { 2952 btrfs_err(fs_info, 2953 "cannot mount read-write because of unsupported optional features (%llx)", 2954 features); 2955 err = -EINVAL; 2956 goto fail_alloc; 2957 } 2958 2959 ret = btrfs_init_workqueues(fs_info, fs_devices); 2960 if (ret) { 2961 err = ret; 2962 goto fail_sb_buffer; 2963 } 2964 2965 sb->s_bdi->congested_fn = btrfs_congested_fn; 2966 sb->s_bdi->congested_data = fs_info; 2967 sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; 2968 sb->s_bdi->ra_pages = VM_READAHEAD_PAGES; 2969 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); 2970 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); 2971 2972 sb->s_blocksize = sectorsize; 2973 sb->s_blocksize_bits = blksize_bits(sectorsize); 2974 memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE); 2975 2976 mutex_lock(&fs_info->chunk_mutex); 2977 ret = btrfs_read_sys_array(fs_info); 2978 mutex_unlock(&fs_info->chunk_mutex); 2979 if (ret) { 2980 btrfs_err(fs_info, "failed to read the system array: %d", ret); 2981 goto fail_sb_buffer; 2982 } 2983 2984 generation = btrfs_super_chunk_root_generation(disk_super); 2985 level = btrfs_super_chunk_root_level(disk_super); 2986 2987 __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); 2988 2989 chunk_root->node = read_tree_block(fs_info, 2990 btrfs_super_chunk_root(disk_super), 2991 generation, level, NULL); 2992 if (IS_ERR(chunk_root->node) || 2993 !extent_buffer_uptodate(chunk_root->node)) { 2994 btrfs_err(fs_info, "failed to read chunk root"); 2995 if (!IS_ERR(chunk_root->node)) 2996 free_extent_buffer(chunk_root->node); 2997 chunk_root->node = NULL; 2998 goto fail_tree_roots; 2999 } 3000 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 3001 chunk_root->commit_root = btrfs_root_node(chunk_root); 3002 3003 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, 3004 btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); 3005 3006 ret = btrfs_read_chunk_tree(fs_info); 3007 if (ret) { 3008 btrfs_err(fs_info, "failed to read chunk tree: %d", ret); 3009 goto fail_tree_roots; 3010 } 3011 3012 /* 3013 * Keep the devid that is marked to be the target device for the 3014 * device replace procedure 3015 */ 3016 btrfs_free_extra_devids(fs_devices, 0); 3017 3018 if (!fs_devices->latest_bdev) { 3019 btrfs_err(fs_info, "failed to read devices"); 3020 goto fail_tree_roots; 3021 } 3022 3023 retry_root_backup: 3024 generation = btrfs_super_generation(disk_super); 3025 level = btrfs_super_root_level(disk_super); 3026 3027 tree_root->node = read_tree_block(fs_info, 3028 btrfs_super_root(disk_super), 3029 generation, level, NULL); 3030 if (IS_ERR(tree_root->node) || 3031 !extent_buffer_uptodate(tree_root->node)) { 3032 btrfs_warn(fs_info, "failed to read tree root"); 3033 if (!IS_ERR(tree_root->node)) 3034 free_extent_buffer(tree_root->node); 3035 tree_root->node = NULL; 3036 goto recovery_tree_root; 3037 } 3038 3039 btrfs_set_root_node(&tree_root->root_item, tree_root->node); 3040 tree_root->commit_root = btrfs_root_node(tree_root); 3041 btrfs_set_root_refs(&tree_root->root_item, 1); 3042 3043 mutex_lock(&tree_root->objectid_mutex); 3044 ret = btrfs_find_highest_objectid(tree_root, 3045 &tree_root->highest_objectid); 3046 if (ret) { 3047 mutex_unlock(&tree_root->objectid_mutex); 3048 goto recovery_tree_root; 3049 } 3050 3051 ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); 3052 3053 mutex_unlock(&tree_root->objectid_mutex); 3054 3055 ret = btrfs_read_roots(fs_info); 3056 if (ret) 3057 goto recovery_tree_root; 3058 3059 fs_info->generation = generation; 3060 fs_info->last_trans_committed = generation; 3061 3062 ret = btrfs_verify_dev_extents(fs_info); 3063 if (ret) { 3064 btrfs_err(fs_info, 3065 "failed to verify dev extents against chunks: %d", 3066 ret); 3067 goto fail_block_groups; 3068 } 3069 ret = btrfs_recover_balance(fs_info); 3070 if (ret) { 3071 btrfs_err(fs_info, "failed to recover balance: %d", ret); 3072 goto fail_block_groups; 3073 } 3074 3075 ret = btrfs_init_dev_stats(fs_info); 3076 if (ret) { 3077 btrfs_err(fs_info, "failed to init dev_stats: %d", ret); 3078 goto fail_block_groups; 3079 } 3080 3081 ret = btrfs_init_dev_replace(fs_info); 3082 if (ret) { 3083 btrfs_err(fs_info, "failed to init dev_replace: %d", ret); 3084 goto fail_block_groups; 3085 } 3086 3087 btrfs_free_extra_devids(fs_devices, 1); 3088 3089 ret = btrfs_sysfs_add_fsid(fs_devices, NULL); 3090 if (ret) { 3091 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", 3092 ret); 3093 goto fail_block_groups; 3094 } 3095 3096 ret = btrfs_sysfs_add_device(fs_devices); 3097 if (ret) { 3098 btrfs_err(fs_info, "failed to init sysfs device interface: %d", 3099 ret); 3100 goto fail_fsdev_sysfs; 3101 } 3102 3103 ret = btrfs_sysfs_add_mounted(fs_info); 3104 if (ret) { 3105 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret); 3106 goto fail_fsdev_sysfs; 3107 } 3108 3109 ret = btrfs_init_space_info(fs_info); 3110 if (ret) { 3111 btrfs_err(fs_info, "failed to initialize space info: %d", ret); 3112 goto fail_sysfs; 3113 } 3114 3115 ret = btrfs_read_block_groups(fs_info); 3116 if (ret) { 3117 btrfs_err(fs_info, "failed to read block groups: %d", ret); 3118 goto fail_sysfs; 3119 } 3120 3121 if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) { 3122 btrfs_warn(fs_info, 3123 "writable mount is not allowed due to too many missing devices"); 3124 goto fail_sysfs; 3125 } 3126 3127 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, 3128 "btrfs-cleaner"); 3129 if (IS_ERR(fs_info->cleaner_kthread)) 3130 goto fail_sysfs; 3131 3132 fs_info->transaction_kthread = kthread_run(transaction_kthread, 3133 tree_root, 3134 "btrfs-transaction"); 3135 if (IS_ERR(fs_info->transaction_kthread)) 3136 goto fail_cleaner; 3137 3138 if (!btrfs_test_opt(fs_info, NOSSD) && 3139 !fs_info->fs_devices->rotating) { 3140 btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations"); 3141 } 3142 3143 /* 3144 * Mount does not set all options immediately, we can do it now and do 3145 * not have to wait for transaction commit 3146 */ 3147 btrfs_apply_pending_changes(fs_info); 3148 3149 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 3150 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) { 3151 ret = btrfsic_mount(fs_info, fs_devices, 3152 btrfs_test_opt(fs_info, 3153 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? 3154 1 : 0, 3155 fs_info->check_integrity_print_mask); 3156 if (ret) 3157 btrfs_warn(fs_info, 3158 "failed to initialize integrity check module: %d", 3159 ret); 3160 } 3161 #endif 3162 ret = btrfs_read_qgroup_config(fs_info); 3163 if (ret) 3164 goto fail_trans_kthread; 3165 3166 if (btrfs_build_ref_tree(fs_info)) 3167 btrfs_err(fs_info, "couldn't build ref tree"); 3168 3169 /* do not make disk changes in broken FS or nologreplay is given */ 3170 if (btrfs_super_log_root(disk_super) != 0 && 3171 !btrfs_test_opt(fs_info, NOLOGREPLAY)) { 3172 ret = btrfs_replay_log(fs_info, fs_devices); 3173 if (ret) { 3174 err = ret; 3175 goto fail_qgroup; 3176 } 3177 } 3178 3179 ret = btrfs_find_orphan_roots(fs_info); 3180 if (ret) 3181 goto fail_qgroup; 3182 3183 if (!sb_rdonly(sb)) { 3184 ret = btrfs_cleanup_fs_roots(fs_info); 3185 if (ret) 3186 goto fail_qgroup; 3187 3188 mutex_lock(&fs_info->cleaner_mutex); 3189 ret = btrfs_recover_relocation(tree_root); 3190 mutex_unlock(&fs_info->cleaner_mutex); 3191 if (ret < 0) { 3192 btrfs_warn(fs_info, "failed to recover relocation: %d", 3193 ret); 3194 err = -EINVAL; 3195 goto fail_qgroup; 3196 } 3197 } 3198 3199 location.objectid = BTRFS_FS_TREE_OBJECTID; 3200 location.type = BTRFS_ROOT_ITEM_KEY; 3201 location.offset = 0; 3202 3203 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); 3204 if (IS_ERR(fs_info->fs_root)) { 3205 err = PTR_ERR(fs_info->fs_root); 3206 btrfs_warn(fs_info, "failed to read fs tree: %d", err); 3207 goto fail_qgroup; 3208 } 3209 3210 if (sb_rdonly(sb)) 3211 return 0; 3212 3213 if (btrfs_test_opt(fs_info, CLEAR_CACHE) && 3214 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 3215 clear_free_space_tree = 1; 3216 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 3217 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) { 3218 btrfs_warn(fs_info, "free space tree is invalid"); 3219 clear_free_space_tree = 1; 3220 } 3221 3222 if (clear_free_space_tree) { 3223 btrfs_info(fs_info, "clearing free space tree"); 3224 ret = btrfs_clear_free_space_tree(fs_info); 3225 if (ret) { 3226 btrfs_warn(fs_info, 3227 "failed to clear free space tree: %d", ret); 3228 close_ctree(fs_info); 3229 return ret; 3230 } 3231 } 3232 3233 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) && 3234 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 3235 btrfs_info(fs_info, "creating free space tree"); 3236 ret = btrfs_create_free_space_tree(fs_info); 3237 if (ret) { 3238 btrfs_warn(fs_info, 3239 "failed to create free space tree: %d", ret); 3240 close_ctree(fs_info); 3241 return ret; 3242 } 3243 } 3244 3245 down_read(&fs_info->cleanup_work_sem); 3246 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || 3247 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { 3248 up_read(&fs_info->cleanup_work_sem); 3249 close_ctree(fs_info); 3250 return ret; 3251 } 3252 up_read(&fs_info->cleanup_work_sem); 3253 3254 ret = btrfs_resume_balance_async(fs_info); 3255 if (ret) { 3256 btrfs_warn(fs_info, "failed to resume balance: %d", ret); 3257 close_ctree(fs_info); 3258 return ret; 3259 } 3260 3261 ret = btrfs_resume_dev_replace_async(fs_info); 3262 if (ret) { 3263 btrfs_warn(fs_info, "failed to resume device replace: %d", ret); 3264 close_ctree(fs_info); 3265 return ret; 3266 } 3267 3268 btrfs_qgroup_rescan_resume(fs_info); 3269 3270 if (!fs_info->uuid_root) { 3271 btrfs_info(fs_info, "creating UUID tree"); 3272 ret = btrfs_create_uuid_tree(fs_info); 3273 if (ret) { 3274 btrfs_warn(fs_info, 3275 "failed to create the UUID tree: %d", ret); 3276 close_ctree(fs_info); 3277 return ret; 3278 } 3279 } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) || 3280 fs_info->generation != 3281 btrfs_super_uuid_tree_generation(disk_super)) { 3282 btrfs_info(fs_info, "checking UUID tree"); 3283 ret = btrfs_check_uuid_tree(fs_info); 3284 if (ret) { 3285 btrfs_warn(fs_info, 3286 "failed to check the UUID tree: %d", ret); 3287 close_ctree(fs_info); 3288 return ret; 3289 } 3290 } else { 3291 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 3292 } 3293 set_bit(BTRFS_FS_OPEN, &fs_info->flags); 3294 3295 /* 3296 * backuproot only affect mount behavior, and if open_ctree succeeded, 3297 * no need to keep the flag 3298 */ 3299 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); 3300 3301 return 0; 3302 3303 fail_qgroup: 3304 btrfs_free_qgroup_config(fs_info); 3305 fail_trans_kthread: 3306 kthread_stop(fs_info->transaction_kthread); 3307 btrfs_cleanup_transaction(fs_info); 3308 btrfs_free_fs_roots(fs_info); 3309 fail_cleaner: 3310 kthread_stop(fs_info->cleaner_kthread); 3311 3312 /* 3313 * make sure we're done with the btree inode before we stop our 3314 * kthreads 3315 */ 3316 filemap_write_and_wait(fs_info->btree_inode->i_mapping); 3317 3318 fail_sysfs: 3319 btrfs_sysfs_remove_mounted(fs_info); 3320 3321 fail_fsdev_sysfs: 3322 btrfs_sysfs_remove_fsid(fs_info->fs_devices); 3323 3324 fail_block_groups: 3325 btrfs_put_block_group_cache(fs_info); 3326 3327 fail_tree_roots: 3328 free_root_pointers(fs_info, 1); 3329 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 3330 3331 fail_sb_buffer: 3332 btrfs_stop_all_workers(fs_info); 3333 btrfs_free_block_groups(fs_info); 3334 fail_alloc: 3335 fail_iput: 3336 btrfs_mapping_tree_free(&fs_info->mapping_tree); 3337 3338 iput(fs_info->btree_inode); 3339 fail_bio_counter: 3340 percpu_counter_destroy(&fs_info->dev_replace.bio_counter); 3341 fail_delalloc_bytes: 3342 percpu_counter_destroy(&fs_info->delalloc_bytes); 3343 fail_dirty_metadata_bytes: 3344 percpu_counter_destroy(&fs_info->dirty_metadata_bytes); 3345 fail_dio_bytes: 3346 percpu_counter_destroy(&fs_info->dio_bytes); 3347 fail_srcu: 3348 cleanup_srcu_struct(&fs_info->subvol_srcu); 3349 fail: 3350 btrfs_free_stripe_hash_table(fs_info); 3351 btrfs_close_devices(fs_info->fs_devices); 3352 return err; 3353 3354 recovery_tree_root: 3355 if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) 3356 goto fail_tree_roots; 3357 3358 free_root_pointers(fs_info, 0); 3359 3360 /* don't use the log in recovery mode, it won't be valid */ 3361 btrfs_set_super_log_root(disk_super, 0); 3362 3363 /* we can't trust the free space cache either */ 3364 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); 3365 3366 ret = next_root_backup(fs_info, fs_info->super_copy, 3367 &num_backups_tried, &backup_index); 3368 if (ret == -1) 3369 goto fail_block_groups; 3370 goto retry_root_backup; 3371 } 3372 ALLOW_ERROR_INJECTION(open_ctree, ERRNO); 3373 3374 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) 3375 { 3376 if (uptodate) { 3377 set_buffer_uptodate(bh); 3378 } else { 3379 struct btrfs_device *device = (struct btrfs_device *) 3380 bh->b_private; 3381 3382 btrfs_warn_rl_in_rcu(device->fs_info, 3383 "lost page write due to IO error on %s", 3384 rcu_str_deref(device->name)); 3385 /* note, we don't set_buffer_write_io_error because we have 3386 * our own ways of dealing with the IO errors 3387 */ 3388 clear_buffer_uptodate(bh); 3389 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); 3390 } 3391 unlock_buffer(bh); 3392 put_bh(bh); 3393 } 3394 3395 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, 3396 struct buffer_head **bh_ret) 3397 { 3398 struct buffer_head *bh; 3399 struct btrfs_super_block *super; 3400 u64 bytenr; 3401 3402 bytenr = btrfs_sb_offset(copy_num); 3403 if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode)) 3404 return -EINVAL; 3405 3406 bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE); 3407 /* 3408 * If we fail to read from the underlying devices, as of now 3409 * the best option we have is to mark it EIO. 3410 */ 3411 if (!bh) 3412 return -EIO; 3413 3414 super = (struct btrfs_super_block *)bh->b_data; 3415 if (btrfs_super_bytenr(super) != bytenr || 3416 btrfs_super_magic(super) != BTRFS_MAGIC) { 3417 brelse(bh); 3418 return -EINVAL; 3419 } 3420 3421 *bh_ret = bh; 3422 return 0; 3423 } 3424 3425 3426 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) 3427 { 3428 struct buffer_head *bh; 3429 struct buffer_head *latest = NULL; 3430 struct btrfs_super_block *super; 3431 int i; 3432 u64 transid = 0; 3433 int ret = -EINVAL; 3434 3435 /* we would like to check all the supers, but that would make 3436 * a btrfs mount succeed after a mkfs from a different FS. 3437 * So, we need to add a special mount option to scan for 3438 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 3439 */ 3440 for (i = 0; i < 1; i++) { 3441 ret = btrfs_read_dev_one_super(bdev, i, &bh); 3442 if (ret) 3443 continue; 3444 3445 super = (struct btrfs_super_block *)bh->b_data; 3446 3447 if (!latest || btrfs_super_generation(super) > transid) { 3448 brelse(latest); 3449 latest = bh; 3450 transid = btrfs_super_generation(super); 3451 } else { 3452 brelse(bh); 3453 } 3454 } 3455 3456 if (!latest) 3457 return ERR_PTR(ret); 3458 3459 return latest; 3460 } 3461 3462 /* 3463 * Write superblock @sb to the @device. Do not wait for completion, all the 3464 * buffer heads we write are pinned. 3465 * 3466 * Write @max_mirrors copies of the superblock, where 0 means default that fit 3467 * the expected device size at commit time. Note that max_mirrors must be 3468 * same for write and wait phases. 3469 * 3470 * Return number of errors when buffer head is not found or submission fails. 3471 */ 3472 static int write_dev_supers(struct btrfs_device *device, 3473 struct btrfs_super_block *sb, int max_mirrors) 3474 { 3475 struct buffer_head *bh; 3476 int i; 3477 int ret; 3478 int errors = 0; 3479 u32 crc; 3480 u64 bytenr; 3481 int op_flags; 3482 3483 if (max_mirrors == 0) 3484 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 3485 3486 for (i = 0; i < max_mirrors; i++) { 3487 bytenr = btrfs_sb_offset(i); 3488 if (bytenr + BTRFS_SUPER_INFO_SIZE >= 3489 device->commit_total_bytes) 3490 break; 3491 3492 btrfs_set_super_bytenr(sb, bytenr); 3493 3494 crc = ~(u32)0; 3495 crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc, 3496 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); 3497 btrfs_csum_final(crc, sb->csum); 3498 3499 /* One reference for us, and we leave it for the caller */ 3500 bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, 3501 BTRFS_SUPER_INFO_SIZE); 3502 if (!bh) { 3503 btrfs_err(device->fs_info, 3504 "couldn't get super buffer head for bytenr %llu", 3505 bytenr); 3506 errors++; 3507 continue; 3508 } 3509 3510 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); 3511 3512 /* one reference for submit_bh */ 3513 get_bh(bh); 3514 3515 set_buffer_uptodate(bh); 3516 lock_buffer(bh); 3517 bh->b_end_io = btrfs_end_buffer_write_sync; 3518 bh->b_private = device; 3519 3520 /* 3521 * we fua the first super. The others we allow 3522 * to go down lazy. 3523 */ 3524 op_flags = REQ_SYNC | REQ_META | REQ_PRIO; 3525 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER)) 3526 op_flags |= REQ_FUA; 3527 ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh); 3528 if (ret) 3529 errors++; 3530 } 3531 return errors < i ? 0 : -1; 3532 } 3533 3534 /* 3535 * Wait for write completion of superblocks done by write_dev_supers, 3536 * @max_mirrors same for write and wait phases. 3537 * 3538 * Return number of errors when buffer head is not found or not marked up to 3539 * date. 3540 */ 3541 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors) 3542 { 3543 struct buffer_head *bh; 3544 int i; 3545 int errors = 0; 3546 bool primary_failed = false; 3547 u64 bytenr; 3548 3549 if (max_mirrors == 0) 3550 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 3551 3552 for (i = 0; i < max_mirrors; i++) { 3553 bytenr = btrfs_sb_offset(i); 3554 if (bytenr + BTRFS_SUPER_INFO_SIZE >= 3555 device->commit_total_bytes) 3556 break; 3557 3558 bh = __find_get_block(device->bdev, 3559 bytenr / BTRFS_BDEV_BLOCKSIZE, 3560 BTRFS_SUPER_INFO_SIZE); 3561 if (!bh) { 3562 errors++; 3563 if (i == 0) 3564 primary_failed = true; 3565 continue; 3566 } 3567 wait_on_buffer(bh); 3568 if (!buffer_uptodate(bh)) { 3569 errors++; 3570 if (i == 0) 3571 primary_failed = true; 3572 } 3573 3574 /* drop our reference */ 3575 brelse(bh); 3576 3577 /* drop the reference from the writing run */ 3578 brelse(bh); 3579 } 3580 3581 /* log error, force error return */ 3582 if (primary_failed) { 3583 btrfs_err(device->fs_info, "error writing primary super block to device %llu", 3584 device->devid); 3585 return -1; 3586 } 3587 3588 return errors < i ? 0 : -1; 3589 } 3590 3591 /* 3592 * endio for the write_dev_flush, this will wake anyone waiting 3593 * for the barrier when it is done 3594 */ 3595 static void btrfs_end_empty_barrier(struct bio *bio) 3596 { 3597 complete(bio->bi_private); 3598 } 3599 3600 /* 3601 * Submit a flush request to the device if it supports it. Error handling is 3602 * done in the waiting counterpart. 3603 */ 3604 static void write_dev_flush(struct btrfs_device *device) 3605 { 3606 struct request_queue *q = bdev_get_queue(device->bdev); 3607 struct bio *bio = device->flush_bio; 3608 3609 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 3610 return; 3611 3612 bio_reset(bio); 3613 bio->bi_end_io = btrfs_end_empty_barrier; 3614 bio_set_dev(bio, device->bdev); 3615 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; 3616 init_completion(&device->flush_wait); 3617 bio->bi_private = &device->flush_wait; 3618 3619 btrfsic_submit_bio(bio); 3620 set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state); 3621 } 3622 3623 /* 3624 * If the flush bio has been submitted by write_dev_flush, wait for it. 3625 */ 3626 static blk_status_t wait_dev_flush(struct btrfs_device *device) 3627 { 3628 struct bio *bio = device->flush_bio; 3629 3630 if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)) 3631 return BLK_STS_OK; 3632 3633 clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state); 3634 wait_for_completion_io(&device->flush_wait); 3635 3636 return bio->bi_status; 3637 } 3638 3639 static int check_barrier_error(struct btrfs_fs_info *fs_info) 3640 { 3641 if (!btrfs_check_rw_degradable(fs_info, NULL)) 3642 return -EIO; 3643 return 0; 3644 } 3645 3646 /* 3647 * send an empty flush down to each device in parallel, 3648 * then wait for them 3649 */ 3650 static int barrier_all_devices(struct btrfs_fs_info *info) 3651 { 3652 struct list_head *head; 3653 struct btrfs_device *dev; 3654 int errors_wait = 0; 3655 blk_status_t ret; 3656 3657 lockdep_assert_held(&info->fs_devices->device_list_mutex); 3658 /* send down all the barriers */ 3659 head = &info->fs_devices->devices; 3660 list_for_each_entry(dev, head, dev_list) { 3661 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) 3662 continue; 3663 if (!dev->bdev) 3664 continue; 3665 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 3666 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) 3667 continue; 3668 3669 write_dev_flush(dev); 3670 dev->last_flush_error = BLK_STS_OK; 3671 } 3672 3673 /* wait for all the barriers */ 3674 list_for_each_entry(dev, head, dev_list) { 3675 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) 3676 continue; 3677 if (!dev->bdev) { 3678 errors_wait++; 3679 continue; 3680 } 3681 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 3682 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) 3683 continue; 3684 3685 ret = wait_dev_flush(dev); 3686 if (ret) { 3687 dev->last_flush_error = ret; 3688 btrfs_dev_stat_inc_and_print(dev, 3689 BTRFS_DEV_STAT_FLUSH_ERRS); 3690 errors_wait++; 3691 } 3692 } 3693 3694 if (errors_wait) { 3695 /* 3696 * At some point we need the status of all disks 3697 * to arrive at the volume status. So error checking 3698 * is being pushed to a separate loop. 3699 */ 3700 return check_barrier_error(info); 3701 } 3702 return 0; 3703 } 3704 3705 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) 3706 { 3707 int raid_type; 3708 int min_tolerated = INT_MAX; 3709 3710 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || 3711 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) 3712 min_tolerated = min(min_tolerated, 3713 btrfs_raid_array[BTRFS_RAID_SINGLE]. 3714 tolerated_failures); 3715 3716 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 3717 if (raid_type == BTRFS_RAID_SINGLE) 3718 continue; 3719 if (!(flags & btrfs_raid_array[raid_type].bg_flag)) 3720 continue; 3721 min_tolerated = min(min_tolerated, 3722 btrfs_raid_array[raid_type]. 3723 tolerated_failures); 3724 } 3725 3726 if (min_tolerated == INT_MAX) { 3727 pr_warn("BTRFS: unknown raid flag: %llu", flags); 3728 min_tolerated = 0; 3729 } 3730 3731 return min_tolerated; 3732 } 3733 3734 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) 3735 { 3736 struct list_head *head; 3737 struct btrfs_device *dev; 3738 struct btrfs_super_block *sb; 3739 struct btrfs_dev_item *dev_item; 3740 int ret; 3741 int do_barriers; 3742 int max_errors; 3743 int total_errors = 0; 3744 u64 flags; 3745 3746 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); 3747 3748 /* 3749 * max_mirrors == 0 indicates we're from commit_transaction, 3750 * not from fsync where the tree roots in fs_info have not 3751 * been consistent on disk. 3752 */ 3753 if (max_mirrors == 0) 3754 backup_super_roots(fs_info); 3755 3756 sb = fs_info->super_for_commit; 3757 dev_item = &sb->dev_item; 3758 3759 mutex_lock(&fs_info->fs_devices->device_list_mutex); 3760 head = &fs_info->fs_devices->devices; 3761 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1; 3762 3763 if (do_barriers) { 3764 ret = barrier_all_devices(fs_info); 3765 if (ret) { 3766 mutex_unlock( 3767 &fs_info->fs_devices->device_list_mutex); 3768 btrfs_handle_fs_error(fs_info, ret, 3769 "errors while submitting device barriers."); 3770 return ret; 3771 } 3772 } 3773 3774 list_for_each_entry(dev, head, dev_list) { 3775 if (!dev->bdev) { 3776 total_errors++; 3777 continue; 3778 } 3779 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 3780 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) 3781 continue; 3782 3783 btrfs_set_stack_device_generation(dev_item, 0); 3784 btrfs_set_stack_device_type(dev_item, dev->type); 3785 btrfs_set_stack_device_id(dev_item, dev->devid); 3786 btrfs_set_stack_device_total_bytes(dev_item, 3787 dev->commit_total_bytes); 3788 btrfs_set_stack_device_bytes_used(dev_item, 3789 dev->commit_bytes_used); 3790 btrfs_set_stack_device_io_align(dev_item, dev->io_align); 3791 btrfs_set_stack_device_io_width(dev_item, dev->io_width); 3792 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); 3793 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); 3794 memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid, 3795 BTRFS_FSID_SIZE); 3796 3797 flags = btrfs_super_flags(sb); 3798 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); 3799 3800 ret = btrfs_validate_write_super(fs_info, sb); 3801 if (ret < 0) { 3802 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 3803 btrfs_handle_fs_error(fs_info, -EUCLEAN, 3804 "unexpected superblock corruption detected"); 3805 return -EUCLEAN; 3806 } 3807 3808 ret = write_dev_supers(dev, sb, max_mirrors); 3809 if (ret) 3810 total_errors++; 3811 } 3812 if (total_errors > max_errors) { 3813 btrfs_err(fs_info, "%d errors while writing supers", 3814 total_errors); 3815 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 3816 3817 /* FUA is masked off if unsupported and can't be the reason */ 3818 btrfs_handle_fs_error(fs_info, -EIO, 3819 "%d errors while writing supers", 3820 total_errors); 3821 return -EIO; 3822 } 3823 3824 total_errors = 0; 3825 list_for_each_entry(dev, head, dev_list) { 3826 if (!dev->bdev) 3827 continue; 3828 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 3829 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) 3830 continue; 3831 3832 ret = wait_dev_supers(dev, max_mirrors); 3833 if (ret) 3834 total_errors++; 3835 } 3836 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 3837 if (total_errors > max_errors) { 3838 btrfs_handle_fs_error(fs_info, -EIO, 3839 "%d errors while writing supers", 3840 total_errors); 3841 return -EIO; 3842 } 3843 return 0; 3844 } 3845 3846 /* Drop a fs root from the radix tree and free it. */ 3847 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, 3848 struct btrfs_root *root) 3849 { 3850 spin_lock(&fs_info->fs_roots_radix_lock); 3851 radix_tree_delete(&fs_info->fs_roots_radix, 3852 (unsigned long)root->root_key.objectid); 3853 spin_unlock(&fs_info->fs_roots_radix_lock); 3854 3855 if (btrfs_root_refs(&root->root_item) == 0) 3856 synchronize_srcu(&fs_info->subvol_srcu); 3857 3858 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 3859 btrfs_free_log(NULL, root); 3860 if (root->reloc_root) { 3861 free_extent_buffer(root->reloc_root->node); 3862 free_extent_buffer(root->reloc_root->commit_root); 3863 btrfs_put_fs_root(root->reloc_root); 3864 root->reloc_root = NULL; 3865 } 3866 } 3867 3868 if (root->free_ino_pinned) 3869 __btrfs_remove_free_space_cache(root->free_ino_pinned); 3870 if (root->free_ino_ctl) 3871 __btrfs_remove_free_space_cache(root->free_ino_ctl); 3872 btrfs_free_fs_root(root); 3873 } 3874 3875 void btrfs_free_fs_root(struct btrfs_root *root) 3876 { 3877 iput(root->ino_cache_inode); 3878 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); 3879 if (root->anon_dev) 3880 free_anon_bdev(root->anon_dev); 3881 if (root->subv_writers) 3882 btrfs_free_subvolume_writers(root->subv_writers); 3883 free_extent_buffer(root->node); 3884 free_extent_buffer(root->commit_root); 3885 kfree(root->free_ino_ctl); 3886 kfree(root->free_ino_pinned); 3887 btrfs_put_fs_root(root); 3888 } 3889 3890 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) 3891 { 3892 u64 root_objectid = 0; 3893 struct btrfs_root *gang[8]; 3894 int i = 0; 3895 int err = 0; 3896 unsigned int ret = 0; 3897 int index; 3898 3899 while (1) { 3900 index = srcu_read_lock(&fs_info->subvol_srcu); 3901 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 3902 (void **)gang, root_objectid, 3903 ARRAY_SIZE(gang)); 3904 if (!ret) { 3905 srcu_read_unlock(&fs_info->subvol_srcu, index); 3906 break; 3907 } 3908 root_objectid = gang[ret - 1]->root_key.objectid + 1; 3909 3910 for (i = 0; i < ret; i++) { 3911 /* Avoid to grab roots in dead_roots */ 3912 if (btrfs_root_refs(&gang[i]->root_item) == 0) { 3913 gang[i] = NULL; 3914 continue; 3915 } 3916 /* grab all the search result for later use */ 3917 gang[i] = btrfs_grab_fs_root(gang[i]); 3918 } 3919 srcu_read_unlock(&fs_info->subvol_srcu, index); 3920 3921 for (i = 0; i < ret; i++) { 3922 if (!gang[i]) 3923 continue; 3924 root_objectid = gang[i]->root_key.objectid; 3925 err = btrfs_orphan_cleanup(gang[i]); 3926 if (err) 3927 break; 3928 btrfs_put_fs_root(gang[i]); 3929 } 3930 root_objectid++; 3931 } 3932 3933 /* release the uncleaned roots due to error */ 3934 for (; i < ret; i++) { 3935 if (gang[i]) 3936 btrfs_put_fs_root(gang[i]); 3937 } 3938 return err; 3939 } 3940 3941 int btrfs_commit_super(struct btrfs_fs_info *fs_info) 3942 { 3943 struct btrfs_root *root = fs_info->tree_root; 3944 struct btrfs_trans_handle *trans; 3945 3946 mutex_lock(&fs_info->cleaner_mutex); 3947 btrfs_run_delayed_iputs(fs_info); 3948 mutex_unlock(&fs_info->cleaner_mutex); 3949 wake_up_process(fs_info->cleaner_kthread); 3950 3951 /* wait until ongoing cleanup work done */ 3952 down_write(&fs_info->cleanup_work_sem); 3953 up_write(&fs_info->cleanup_work_sem); 3954 3955 trans = btrfs_join_transaction(root); 3956 if (IS_ERR(trans)) 3957 return PTR_ERR(trans); 3958 return btrfs_commit_transaction(trans); 3959 } 3960 3961 void close_ctree(struct btrfs_fs_info *fs_info) 3962 { 3963 int ret; 3964 3965 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); 3966 /* 3967 * We don't want the cleaner to start new transactions, add more delayed 3968 * iputs, etc. while we're closing. We can't use kthread_stop() yet 3969 * because that frees the task_struct, and the transaction kthread might 3970 * still try to wake up the cleaner. 3971 */ 3972 kthread_park(fs_info->cleaner_kthread); 3973 3974 /* wait for the qgroup rescan worker to stop */ 3975 btrfs_qgroup_wait_for_completion(fs_info, false); 3976 3977 /* wait for the uuid_scan task to finish */ 3978 down(&fs_info->uuid_tree_rescan_sem); 3979 /* avoid complains from lockdep et al., set sem back to initial state */ 3980 up(&fs_info->uuid_tree_rescan_sem); 3981 3982 /* pause restriper - we want to resume on mount */ 3983 btrfs_pause_balance(fs_info); 3984 3985 btrfs_dev_replace_suspend_for_unmount(fs_info); 3986 3987 btrfs_scrub_cancel(fs_info); 3988 3989 /* wait for any defraggers to finish */ 3990 wait_event(fs_info->transaction_wait, 3991 (atomic_read(&fs_info->defrag_running) == 0)); 3992 3993 /* clear out the rbtree of defraggable inodes */ 3994 btrfs_cleanup_defrag_inodes(fs_info); 3995 3996 cancel_work_sync(&fs_info->async_reclaim_work); 3997 3998 if (!sb_rdonly(fs_info->sb)) { 3999 /* 4000 * The cleaner kthread is stopped, so do one final pass over 4001 * unused block groups. 4002 */ 4003 btrfs_delete_unused_bgs(fs_info); 4004 4005 ret = btrfs_commit_super(fs_info); 4006 if (ret) 4007 btrfs_err(fs_info, "commit super ret %d", ret); 4008 } 4009 4010 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) || 4011 test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) 4012 btrfs_error_commit_super(fs_info); 4013 4014 kthread_stop(fs_info->transaction_kthread); 4015 kthread_stop(fs_info->cleaner_kthread); 4016 4017 ASSERT(list_empty(&fs_info->delayed_iputs)); 4018 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); 4019 4020 btrfs_free_qgroup_config(fs_info); 4021 ASSERT(list_empty(&fs_info->delalloc_roots)); 4022 4023 if (percpu_counter_sum(&fs_info->delalloc_bytes)) { 4024 btrfs_info(fs_info, "at unmount delalloc count %lld", 4025 percpu_counter_sum(&fs_info->delalloc_bytes)); 4026 } 4027 4028 if (percpu_counter_sum(&fs_info->dio_bytes)) 4029 btrfs_info(fs_info, "at unmount dio bytes count %lld", 4030 percpu_counter_sum(&fs_info->dio_bytes)); 4031 4032 btrfs_sysfs_remove_mounted(fs_info); 4033 btrfs_sysfs_remove_fsid(fs_info->fs_devices); 4034 4035 btrfs_free_fs_roots(fs_info); 4036 4037 btrfs_put_block_group_cache(fs_info); 4038 4039 /* 4040 * we must make sure there is not any read request to 4041 * submit after we stopping all workers. 4042 */ 4043 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 4044 btrfs_stop_all_workers(fs_info); 4045 4046 btrfs_free_block_groups(fs_info); 4047 4048 clear_bit(BTRFS_FS_OPEN, &fs_info->flags); 4049 free_root_pointers(fs_info, 1); 4050 4051 iput(fs_info->btree_inode); 4052 4053 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 4054 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) 4055 btrfsic_unmount(fs_info->fs_devices); 4056 #endif 4057 4058 btrfs_mapping_tree_free(&fs_info->mapping_tree); 4059 btrfs_close_devices(fs_info->fs_devices); 4060 4061 percpu_counter_destroy(&fs_info->dirty_metadata_bytes); 4062 percpu_counter_destroy(&fs_info->delalloc_bytes); 4063 percpu_counter_destroy(&fs_info->dio_bytes); 4064 percpu_counter_destroy(&fs_info->dev_replace.bio_counter); 4065 cleanup_srcu_struct(&fs_info->subvol_srcu); 4066 4067 btrfs_free_stripe_hash_table(fs_info); 4068 btrfs_free_ref_cache(fs_info); 4069 } 4070 4071 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, 4072 int atomic) 4073 { 4074 int ret; 4075 struct inode *btree_inode = buf->pages[0]->mapping->host; 4076 4077 ret = extent_buffer_uptodate(buf); 4078 if (!ret) 4079 return ret; 4080 4081 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, 4082 parent_transid, atomic); 4083 if (ret == -EAGAIN) 4084 return ret; 4085 return !ret; 4086 } 4087 4088 void btrfs_mark_buffer_dirty(struct extent_buffer *buf) 4089 { 4090 struct btrfs_fs_info *fs_info; 4091 struct btrfs_root *root; 4092 u64 transid = btrfs_header_generation(buf); 4093 int was_dirty; 4094 4095 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4096 /* 4097 * This is a fast path so only do this check if we have sanity tests 4098 * enabled. Normal people shouldn't be using unmapped buffers as dirty 4099 * outside of the sanity tests. 4100 */ 4101 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags))) 4102 return; 4103 #endif 4104 root = BTRFS_I(buf->pages[0]->mapping->host)->root; 4105 fs_info = root->fs_info; 4106 btrfs_assert_tree_locked(buf); 4107 if (transid != fs_info->generation) 4108 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n", 4109 buf->start, transid, fs_info->generation); 4110 was_dirty = set_extent_buffer_dirty(buf); 4111 if (!was_dirty) 4112 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, 4113 buf->len, 4114 fs_info->dirty_metadata_batch); 4115 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 4116 /* 4117 * Since btrfs_mark_buffer_dirty() can be called with item pointer set 4118 * but item data not updated. 4119 * So here we should only check item pointers, not item data. 4120 */ 4121 if (btrfs_header_level(buf) == 0 && 4122 btrfs_check_leaf_relaxed(buf)) { 4123 btrfs_print_leaf(buf); 4124 ASSERT(0); 4125 } 4126 #endif 4127 } 4128 4129 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, 4130 int flush_delayed) 4131 { 4132 /* 4133 * looks as though older kernels can get into trouble with 4134 * this code, they end up stuck in balance_dirty_pages forever 4135 */ 4136 int ret; 4137 4138 if (current->flags & PF_MEMALLOC) 4139 return; 4140 4141 if (flush_delayed) 4142 btrfs_balance_delayed_items(fs_info); 4143 4144 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, 4145 BTRFS_DIRTY_METADATA_THRESH, 4146 fs_info->dirty_metadata_batch); 4147 if (ret > 0) { 4148 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping); 4149 } 4150 } 4151 4152 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info) 4153 { 4154 __btrfs_btree_balance_dirty(fs_info, 1); 4155 } 4156 4157 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info) 4158 { 4159 __btrfs_btree_balance_dirty(fs_info, 0); 4160 } 4161 4162 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level, 4163 struct btrfs_key *first_key) 4164 { 4165 return btree_read_extent_buffer_pages(buf, parent_transid, 4166 level, first_key); 4167 } 4168 4169 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) 4170 { 4171 /* cleanup FS via transaction */ 4172 btrfs_cleanup_transaction(fs_info); 4173 4174 mutex_lock(&fs_info->cleaner_mutex); 4175 btrfs_run_delayed_iputs(fs_info); 4176 mutex_unlock(&fs_info->cleaner_mutex); 4177 4178 down_write(&fs_info->cleanup_work_sem); 4179 up_write(&fs_info->cleanup_work_sem); 4180 } 4181 4182 static void btrfs_destroy_ordered_extents(struct btrfs_root *root) 4183 { 4184 struct btrfs_ordered_extent *ordered; 4185 4186 spin_lock(&root->ordered_extent_lock); 4187 /* 4188 * This will just short circuit the ordered completion stuff which will 4189 * make sure the ordered extent gets properly cleaned up. 4190 */ 4191 list_for_each_entry(ordered, &root->ordered_extents, 4192 root_extent_list) 4193 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); 4194 spin_unlock(&root->ordered_extent_lock); 4195 } 4196 4197 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) 4198 { 4199 struct btrfs_root *root; 4200 struct list_head splice; 4201 4202 INIT_LIST_HEAD(&splice); 4203 4204 spin_lock(&fs_info->ordered_root_lock); 4205 list_splice_init(&fs_info->ordered_roots, &splice); 4206 while (!list_empty(&splice)) { 4207 root = list_first_entry(&splice, struct btrfs_root, 4208 ordered_root); 4209 list_move_tail(&root->ordered_root, 4210 &fs_info->ordered_roots); 4211 4212 spin_unlock(&fs_info->ordered_root_lock); 4213 btrfs_destroy_ordered_extents(root); 4214 4215 cond_resched(); 4216 spin_lock(&fs_info->ordered_root_lock); 4217 } 4218 spin_unlock(&fs_info->ordered_root_lock); 4219 4220 /* 4221 * We need this here because if we've been flipped read-only we won't 4222 * get sync() from the umount, so we need to make sure any ordered 4223 * extents that haven't had their dirty pages IO start writeout yet 4224 * actually get run and error out properly. 4225 */ 4226 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); 4227 } 4228 4229 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 4230 struct btrfs_fs_info *fs_info) 4231 { 4232 struct rb_node *node; 4233 struct btrfs_delayed_ref_root *delayed_refs; 4234 struct btrfs_delayed_ref_node *ref; 4235 int ret = 0; 4236 4237 delayed_refs = &trans->delayed_refs; 4238 4239 spin_lock(&delayed_refs->lock); 4240 if (atomic_read(&delayed_refs->num_entries) == 0) { 4241 spin_unlock(&delayed_refs->lock); 4242 btrfs_info(fs_info, "delayed_refs has NO entry"); 4243 return ret; 4244 } 4245 4246 while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) { 4247 struct btrfs_delayed_ref_head *head; 4248 struct rb_node *n; 4249 bool pin_bytes = false; 4250 4251 head = rb_entry(node, struct btrfs_delayed_ref_head, 4252 href_node); 4253 if (btrfs_delayed_ref_lock(delayed_refs, head)) 4254 continue; 4255 4256 spin_lock(&head->lock); 4257 while ((n = rb_first_cached(&head->ref_tree)) != NULL) { 4258 ref = rb_entry(n, struct btrfs_delayed_ref_node, 4259 ref_node); 4260 ref->in_tree = 0; 4261 rb_erase_cached(&ref->ref_node, &head->ref_tree); 4262 RB_CLEAR_NODE(&ref->ref_node); 4263 if (!list_empty(&ref->add_list)) 4264 list_del(&ref->add_list); 4265 atomic_dec(&delayed_refs->num_entries); 4266 btrfs_put_delayed_ref(ref); 4267 } 4268 if (head->must_insert_reserved) 4269 pin_bytes = true; 4270 btrfs_free_delayed_extent_op(head->extent_op); 4271 btrfs_delete_ref_head(delayed_refs, head); 4272 spin_unlock(&head->lock); 4273 spin_unlock(&delayed_refs->lock); 4274 mutex_unlock(&head->mutex); 4275 4276 if (pin_bytes) 4277 btrfs_pin_extent(fs_info, head->bytenr, 4278 head->num_bytes, 1); 4279 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); 4280 btrfs_put_delayed_ref_head(head); 4281 cond_resched(); 4282 spin_lock(&delayed_refs->lock); 4283 } 4284 4285 spin_unlock(&delayed_refs->lock); 4286 4287 return ret; 4288 } 4289 4290 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) 4291 { 4292 struct btrfs_inode *btrfs_inode; 4293 struct list_head splice; 4294 4295 INIT_LIST_HEAD(&splice); 4296 4297 spin_lock(&root->delalloc_lock); 4298 list_splice_init(&root->delalloc_inodes, &splice); 4299 4300 while (!list_empty(&splice)) { 4301 struct inode *inode = NULL; 4302 btrfs_inode = list_first_entry(&splice, struct btrfs_inode, 4303 delalloc_inodes); 4304 __btrfs_del_delalloc_inode(root, btrfs_inode); 4305 spin_unlock(&root->delalloc_lock); 4306 4307 /* 4308 * Make sure we get a live inode and that it'll not disappear 4309 * meanwhile. 4310 */ 4311 inode = igrab(&btrfs_inode->vfs_inode); 4312 if (inode) { 4313 invalidate_inode_pages2(inode->i_mapping); 4314 iput(inode); 4315 } 4316 spin_lock(&root->delalloc_lock); 4317 } 4318 spin_unlock(&root->delalloc_lock); 4319 } 4320 4321 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) 4322 { 4323 struct btrfs_root *root; 4324 struct list_head splice; 4325 4326 INIT_LIST_HEAD(&splice); 4327 4328 spin_lock(&fs_info->delalloc_root_lock); 4329 list_splice_init(&fs_info->delalloc_roots, &splice); 4330 while (!list_empty(&splice)) { 4331 root = list_first_entry(&splice, struct btrfs_root, 4332 delalloc_root); 4333 root = btrfs_grab_fs_root(root); 4334 BUG_ON(!root); 4335 spin_unlock(&fs_info->delalloc_root_lock); 4336 4337 btrfs_destroy_delalloc_inodes(root); 4338 btrfs_put_fs_root(root); 4339 4340 spin_lock(&fs_info->delalloc_root_lock); 4341 } 4342 spin_unlock(&fs_info->delalloc_root_lock); 4343 } 4344 4345 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, 4346 struct extent_io_tree *dirty_pages, 4347 int mark) 4348 { 4349 int ret; 4350 struct extent_buffer *eb; 4351 u64 start = 0; 4352 u64 end; 4353 4354 while (1) { 4355 ret = find_first_extent_bit(dirty_pages, start, &start, &end, 4356 mark, NULL); 4357 if (ret) 4358 break; 4359 4360 clear_extent_bits(dirty_pages, start, end, mark); 4361 while (start <= end) { 4362 eb = find_extent_buffer(fs_info, start); 4363 start += fs_info->nodesize; 4364 if (!eb) 4365 continue; 4366 wait_on_extent_buffer_writeback(eb); 4367 4368 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, 4369 &eb->bflags)) 4370 clear_extent_buffer_dirty(eb); 4371 free_extent_buffer_stale(eb); 4372 } 4373 } 4374 4375 return ret; 4376 } 4377 4378 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, 4379 struct extent_io_tree *pinned_extents) 4380 { 4381 struct extent_io_tree *unpin; 4382 u64 start; 4383 u64 end; 4384 int ret; 4385 bool loop = true; 4386 4387 unpin = pinned_extents; 4388 again: 4389 while (1) { 4390 struct extent_state *cached_state = NULL; 4391 4392 /* 4393 * The btrfs_finish_extent_commit() may get the same range as 4394 * ours between find_first_extent_bit and clear_extent_dirty. 4395 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin 4396 * the same extent range. 4397 */ 4398 mutex_lock(&fs_info->unused_bg_unpin_mutex); 4399 ret = find_first_extent_bit(unpin, 0, &start, &end, 4400 EXTENT_DIRTY, &cached_state); 4401 if (ret) { 4402 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 4403 break; 4404 } 4405 4406 clear_extent_dirty(unpin, start, end, &cached_state); 4407 free_extent_state(cached_state); 4408 btrfs_error_unpin_extent_range(fs_info, start, end); 4409 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 4410 cond_resched(); 4411 } 4412 4413 if (loop) { 4414 if (unpin == &fs_info->freed_extents[0]) 4415 unpin = &fs_info->freed_extents[1]; 4416 else 4417 unpin = &fs_info->freed_extents[0]; 4418 loop = false; 4419 goto again; 4420 } 4421 4422 return 0; 4423 } 4424 4425 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) 4426 { 4427 struct inode *inode; 4428 4429 inode = cache->io_ctl.inode; 4430 if (inode) { 4431 invalidate_inode_pages2(inode->i_mapping); 4432 BTRFS_I(inode)->generation = 0; 4433 cache->io_ctl.inode = NULL; 4434 iput(inode); 4435 } 4436 btrfs_put_block_group(cache); 4437 } 4438 4439 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, 4440 struct btrfs_fs_info *fs_info) 4441 { 4442 struct btrfs_block_group_cache *cache; 4443 4444 spin_lock(&cur_trans->dirty_bgs_lock); 4445 while (!list_empty(&cur_trans->dirty_bgs)) { 4446 cache = list_first_entry(&cur_trans->dirty_bgs, 4447 struct btrfs_block_group_cache, 4448 dirty_list); 4449 4450 if (!list_empty(&cache->io_list)) { 4451 spin_unlock(&cur_trans->dirty_bgs_lock); 4452 list_del_init(&cache->io_list); 4453 btrfs_cleanup_bg_io(cache); 4454 spin_lock(&cur_trans->dirty_bgs_lock); 4455 } 4456 4457 list_del_init(&cache->dirty_list); 4458 spin_lock(&cache->lock); 4459 cache->disk_cache_state = BTRFS_DC_ERROR; 4460 spin_unlock(&cache->lock); 4461 4462 spin_unlock(&cur_trans->dirty_bgs_lock); 4463 btrfs_put_block_group(cache); 4464 btrfs_delayed_refs_rsv_release(fs_info, 1); 4465 spin_lock(&cur_trans->dirty_bgs_lock); 4466 } 4467 spin_unlock(&cur_trans->dirty_bgs_lock); 4468 4469 /* 4470 * Refer to the definition of io_bgs member for details why it's safe 4471 * to use it without any locking 4472 */ 4473 while (!list_empty(&cur_trans->io_bgs)) { 4474 cache = list_first_entry(&cur_trans->io_bgs, 4475 struct btrfs_block_group_cache, 4476 io_list); 4477 4478 list_del_init(&cache->io_list); 4479 spin_lock(&cache->lock); 4480 cache->disk_cache_state = BTRFS_DC_ERROR; 4481 spin_unlock(&cache->lock); 4482 btrfs_cleanup_bg_io(cache); 4483 } 4484 } 4485 4486 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, 4487 struct btrfs_fs_info *fs_info) 4488 { 4489 struct btrfs_device *dev, *tmp; 4490 4491 btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 4492 ASSERT(list_empty(&cur_trans->dirty_bgs)); 4493 ASSERT(list_empty(&cur_trans->io_bgs)); 4494 4495 list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list, 4496 post_commit_list) { 4497 list_del_init(&dev->post_commit_list); 4498 } 4499 4500 btrfs_destroy_delayed_refs(cur_trans, fs_info); 4501 4502 cur_trans->state = TRANS_STATE_COMMIT_START; 4503 wake_up(&fs_info->transaction_blocked_wait); 4504 4505 cur_trans->state = TRANS_STATE_UNBLOCKED; 4506 wake_up(&fs_info->transaction_wait); 4507 4508 btrfs_destroy_delayed_inodes(fs_info); 4509 btrfs_assert_delayed_root_empty(fs_info); 4510 4511 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, 4512 EXTENT_DIRTY); 4513 btrfs_destroy_pinned_extent(fs_info, 4514 fs_info->pinned_extents); 4515 4516 cur_trans->state =TRANS_STATE_COMPLETED; 4517 wake_up(&cur_trans->commit_wait); 4518 } 4519 4520 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) 4521 { 4522 struct btrfs_transaction *t; 4523 4524 mutex_lock(&fs_info->transaction_kthread_mutex); 4525 4526 spin_lock(&fs_info->trans_lock); 4527 while (!list_empty(&fs_info->trans_list)) { 4528 t = list_first_entry(&fs_info->trans_list, 4529 struct btrfs_transaction, list); 4530 if (t->state >= TRANS_STATE_COMMIT_START) { 4531 refcount_inc(&t->use_count); 4532 spin_unlock(&fs_info->trans_lock); 4533 btrfs_wait_for_commit(fs_info, t->transid); 4534 btrfs_put_transaction(t); 4535 spin_lock(&fs_info->trans_lock); 4536 continue; 4537 } 4538 if (t == fs_info->running_transaction) { 4539 t->state = TRANS_STATE_COMMIT_DOING; 4540 spin_unlock(&fs_info->trans_lock); 4541 /* 4542 * We wait for 0 num_writers since we don't hold a trans 4543 * handle open currently for this transaction. 4544 */ 4545 wait_event(t->writer_wait, 4546 atomic_read(&t->num_writers) == 0); 4547 } else { 4548 spin_unlock(&fs_info->trans_lock); 4549 } 4550 btrfs_cleanup_one_transaction(t, fs_info); 4551 4552 spin_lock(&fs_info->trans_lock); 4553 if (t == fs_info->running_transaction) 4554 fs_info->running_transaction = NULL; 4555 list_del_init(&t->list); 4556 spin_unlock(&fs_info->trans_lock); 4557 4558 btrfs_put_transaction(t); 4559 trace_btrfs_transaction_commit(fs_info->tree_root); 4560 spin_lock(&fs_info->trans_lock); 4561 } 4562 spin_unlock(&fs_info->trans_lock); 4563 btrfs_destroy_all_ordered_extents(fs_info); 4564 btrfs_destroy_delayed_inodes(fs_info); 4565 btrfs_assert_delayed_root_empty(fs_info); 4566 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); 4567 btrfs_destroy_all_delalloc_inodes(fs_info); 4568 mutex_unlock(&fs_info->transaction_kthread_mutex); 4569 4570 return 0; 4571 } 4572 4573 static const struct extent_io_ops btree_extent_io_ops = { 4574 /* mandatory callbacks */ 4575 .submit_bio_hook = btree_submit_bio_hook, 4576 .readpage_end_io_hook = btree_readpage_end_io_hook, 4577 }; 4578