1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/blkdev.h> 21 #include <linux/scatterlist.h> 22 #include <linux/swap.h> 23 #include <linux/radix-tree.h> 24 #include <linux/writeback.h> 25 #include <linux/buffer_head.h> 26 #include <linux/workqueue.h> 27 #include <linux/kthread.h> 28 #include <linux/slab.h> 29 #include <linux/migrate.h> 30 #include <linux/ratelimit.h> 31 #include <linux/uuid.h> 32 #include <linux/semaphore.h> 33 #include <asm/unaligned.h> 34 #include "ctree.h" 35 #include "disk-io.h" 36 #include "hash.h" 37 #include "transaction.h" 38 #include "btrfs_inode.h" 39 #include "volumes.h" 40 #include "print-tree.h" 41 #include "locking.h" 42 #include "tree-log.h" 43 #include "free-space-cache.h" 44 #include "free-space-tree.h" 45 #include "inode-map.h" 46 #include "check-integrity.h" 47 #include "rcu-string.h" 48 #include "dev-replace.h" 49 #include "raid56.h" 50 #include "sysfs.h" 51 #include "qgroup.h" 52 #include "compression.h" 53 54 #ifdef CONFIG_X86 55 #include <asm/cpufeature.h> 56 #endif 57 58 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ 59 BTRFS_HEADER_FLAG_RELOC |\ 60 BTRFS_SUPER_FLAG_ERROR |\ 61 BTRFS_SUPER_FLAG_SEEDING |\ 62 BTRFS_SUPER_FLAG_METADUMP) 63 64 static const struct extent_io_ops btree_extent_io_ops; 65 static void end_workqueue_fn(struct btrfs_work *work); 66 static void free_fs_root(struct btrfs_root *root); 67 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, 68 int read_only); 69 static void btrfs_destroy_ordered_extents(struct btrfs_root *root); 70 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 71 struct btrfs_root *root); 72 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); 73 static int btrfs_destroy_marked_extents(struct btrfs_root *root, 74 struct extent_io_tree *dirty_pages, 75 int mark); 76 static int btrfs_destroy_pinned_extent(struct btrfs_root *root, 77 struct extent_io_tree *pinned_extents); 78 static int btrfs_cleanup_transaction(struct btrfs_root *root); 79 static void btrfs_error_commit_super(struct btrfs_root *root); 80 81 /* 82 * btrfs_end_io_wq structs are used to do processing in task context when an IO 83 * is complete. This is used during reads to verify checksums, and it is used 84 * by writes to insert metadata for new file extents after IO is complete. 85 */ 86 struct btrfs_end_io_wq { 87 struct bio *bio; 88 bio_end_io_t *end_io; 89 void *private; 90 struct btrfs_fs_info *info; 91 int error; 92 enum btrfs_wq_endio_type metadata; 93 struct list_head list; 94 struct btrfs_work work; 95 }; 96 97 static struct kmem_cache *btrfs_end_io_wq_cache; 98 99 int __init btrfs_end_io_wq_init(void) 100 { 101 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq", 102 sizeof(struct btrfs_end_io_wq), 103 0, 104 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 105 NULL); 106 if (!btrfs_end_io_wq_cache) 107 return -ENOMEM; 108 return 0; 109 } 110 111 void btrfs_end_io_wq_exit(void) 112 { 113 kmem_cache_destroy(btrfs_end_io_wq_cache); 114 } 115 116 /* 117 * async submit bios are used to offload expensive checksumming 118 * onto the worker threads. They checksum file and metadata bios 119 * just before they are sent down the IO stack. 120 */ 121 struct async_submit_bio { 122 struct inode *inode; 123 struct bio *bio; 124 struct list_head list; 125 extent_submit_bio_hook_t *submit_bio_start; 126 extent_submit_bio_hook_t *submit_bio_done; 127 int rw; 128 int mirror_num; 129 unsigned long bio_flags; 130 /* 131 * bio_offset is optional, can be used if the pages in the bio 132 * can't tell us where in the file the bio should go 133 */ 134 u64 bio_offset; 135 struct btrfs_work work; 136 int error; 137 }; 138 139 /* 140 * Lockdep class keys for extent_buffer->lock's in this root. For a given 141 * eb, the lockdep key is determined by the btrfs_root it belongs to and 142 * the level the eb occupies in the tree. 143 * 144 * Different roots are used for different purposes and may nest inside each 145 * other and they require separate keysets. As lockdep keys should be 146 * static, assign keysets according to the purpose of the root as indicated 147 * by btrfs_root->objectid. This ensures that all special purpose roots 148 * have separate keysets. 149 * 150 * Lock-nesting across peer nodes is always done with the immediate parent 151 * node locked thus preventing deadlock. As lockdep doesn't know this, use 152 * subclass to avoid triggering lockdep warning in such cases. 153 * 154 * The key is set by the readpage_end_io_hook after the buffer has passed 155 * csum validation but before the pages are unlocked. It is also set by 156 * btrfs_init_new_buffer on freshly allocated blocks. 157 * 158 * We also add a check to make sure the highest level of the tree is the 159 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code 160 * needs update as well. 161 */ 162 #ifdef CONFIG_DEBUG_LOCK_ALLOC 163 # if BTRFS_MAX_LEVEL != 8 164 # error 165 # endif 166 167 static struct btrfs_lockdep_keyset { 168 u64 id; /* root objectid */ 169 const char *name_stem; /* lock name stem */ 170 char names[BTRFS_MAX_LEVEL + 1][20]; 171 struct lock_class_key keys[BTRFS_MAX_LEVEL + 1]; 172 } btrfs_lockdep_keysets[] = { 173 { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" }, 174 { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" }, 175 { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" }, 176 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, 177 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, 178 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, 179 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, 180 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, 181 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, 182 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, 183 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, 184 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" }, 185 { .id = 0, .name_stem = "tree" }, 186 }; 187 188 void __init btrfs_init_lockdep(void) 189 { 190 int i, j; 191 192 /* initialize lockdep class names */ 193 for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { 194 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; 195 196 for (j = 0; j < ARRAY_SIZE(ks->names); j++) 197 snprintf(ks->names[j], sizeof(ks->names[j]), 198 "btrfs-%s-%02d", ks->name_stem, j); 199 } 200 } 201 202 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, 203 int level) 204 { 205 struct btrfs_lockdep_keyset *ks; 206 207 BUG_ON(level >= ARRAY_SIZE(ks->keys)); 208 209 /* find the matching keyset, id 0 is the default entry */ 210 for (ks = btrfs_lockdep_keysets; ks->id; ks++) 211 if (ks->id == objectid) 212 break; 213 214 lockdep_set_class_and_name(&eb->lock, 215 &ks->keys[level], ks->names[level]); 216 } 217 218 #endif 219 220 /* 221 * extents on the btree inode are pretty simple, there's one extent 222 * that covers the entire device 223 */ 224 static struct extent_map *btree_get_extent(struct inode *inode, 225 struct page *page, size_t pg_offset, u64 start, u64 len, 226 int create) 227 { 228 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 229 struct extent_map *em; 230 int ret; 231 232 read_lock(&em_tree->lock); 233 em = lookup_extent_mapping(em_tree, start, len); 234 if (em) { 235 em->bdev = 236 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 237 read_unlock(&em_tree->lock); 238 goto out; 239 } 240 read_unlock(&em_tree->lock); 241 242 em = alloc_extent_map(); 243 if (!em) { 244 em = ERR_PTR(-ENOMEM); 245 goto out; 246 } 247 em->start = 0; 248 em->len = (u64)-1; 249 em->block_len = (u64)-1; 250 em->block_start = 0; 251 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 252 253 write_lock(&em_tree->lock); 254 ret = add_extent_mapping(em_tree, em, 0); 255 if (ret == -EEXIST) { 256 free_extent_map(em); 257 em = lookup_extent_mapping(em_tree, start, len); 258 if (!em) 259 em = ERR_PTR(-EIO); 260 } else if (ret) { 261 free_extent_map(em); 262 em = ERR_PTR(ret); 263 } 264 write_unlock(&em_tree->lock); 265 266 out: 267 return em; 268 } 269 270 u32 btrfs_csum_data(char *data, u32 seed, size_t len) 271 { 272 return btrfs_crc32c(seed, data, len); 273 } 274 275 void btrfs_csum_final(u32 crc, char *result) 276 { 277 put_unaligned_le32(~crc, result); 278 } 279 280 /* 281 * compute the csum for a btree block, and either verify it or write it 282 * into the csum field of the block. 283 */ 284 static int csum_tree_block(struct btrfs_fs_info *fs_info, 285 struct extent_buffer *buf, 286 int verify) 287 { 288 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 289 char *result = NULL; 290 unsigned long len; 291 unsigned long cur_len; 292 unsigned long offset = BTRFS_CSUM_SIZE; 293 char *kaddr; 294 unsigned long map_start; 295 unsigned long map_len; 296 int err; 297 u32 crc = ~(u32)0; 298 unsigned long inline_result; 299 300 len = buf->len - offset; 301 while (len > 0) { 302 err = map_private_extent_buffer(buf, offset, 32, 303 &kaddr, &map_start, &map_len); 304 if (err) 305 return err; 306 cur_len = min(len, map_len - (offset - map_start)); 307 crc = btrfs_csum_data(kaddr + offset - map_start, 308 crc, cur_len); 309 len -= cur_len; 310 offset += cur_len; 311 } 312 if (csum_size > sizeof(inline_result)) { 313 result = kzalloc(csum_size, GFP_NOFS); 314 if (!result) 315 return -ENOMEM; 316 } else { 317 result = (char *)&inline_result; 318 } 319 320 btrfs_csum_final(crc, result); 321 322 if (verify) { 323 if (memcmp_extent_buffer(buf, result, 0, csum_size)) { 324 u32 val; 325 u32 found = 0; 326 memcpy(&found, result, csum_size); 327 328 read_extent_buffer(buf, &val, 0, csum_size); 329 btrfs_warn_rl(fs_info, 330 "%s checksum verify failed on %llu wanted %X found %X " 331 "level %d", 332 fs_info->sb->s_id, buf->start, 333 val, found, btrfs_header_level(buf)); 334 if (result != (char *)&inline_result) 335 kfree(result); 336 return -EUCLEAN; 337 } 338 } else { 339 write_extent_buffer(buf, result, 0, csum_size); 340 } 341 if (result != (char *)&inline_result) 342 kfree(result); 343 return 0; 344 } 345 346 /* 347 * we can't consider a given block up to date unless the transid of the 348 * block matches the transid in the parent node's pointer. This is how we 349 * detect blocks that either didn't get written at all or got written 350 * in the wrong place. 351 */ 352 static int verify_parent_transid(struct extent_io_tree *io_tree, 353 struct extent_buffer *eb, u64 parent_transid, 354 int atomic) 355 { 356 struct extent_state *cached_state = NULL; 357 int ret; 358 bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB); 359 360 if (!parent_transid || btrfs_header_generation(eb) == parent_transid) 361 return 0; 362 363 if (atomic) 364 return -EAGAIN; 365 366 if (need_lock) { 367 btrfs_tree_read_lock(eb); 368 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 369 } 370 371 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 372 &cached_state); 373 if (extent_buffer_uptodate(eb) && 374 btrfs_header_generation(eb) == parent_transid) { 375 ret = 0; 376 goto out; 377 } 378 btrfs_err_rl(eb->fs_info, 379 "parent transid verify failed on %llu wanted %llu found %llu", 380 eb->start, 381 parent_transid, btrfs_header_generation(eb)); 382 ret = 1; 383 384 /* 385 * Things reading via commit roots that don't have normal protection, 386 * like send, can have a really old block in cache that may point at a 387 * block that has been freed and re-allocated. So don't clear uptodate 388 * if we find an eb that is under IO (dirty/writeback) because we could 389 * end up reading in the stale data and then writing it back out and 390 * making everybody very sad. 391 */ 392 if (!extent_buffer_under_io(eb)) 393 clear_extent_buffer_uptodate(eb); 394 out: 395 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, 396 &cached_state, GFP_NOFS); 397 if (need_lock) 398 btrfs_tree_read_unlock_blocking(eb); 399 return ret; 400 } 401 402 /* 403 * Return 0 if the superblock checksum type matches the checksum value of that 404 * algorithm. Pass the raw disk superblock data. 405 */ 406 static int btrfs_check_super_csum(char *raw_disk_sb) 407 { 408 struct btrfs_super_block *disk_sb = 409 (struct btrfs_super_block *)raw_disk_sb; 410 u16 csum_type = btrfs_super_csum_type(disk_sb); 411 int ret = 0; 412 413 if (csum_type == BTRFS_CSUM_TYPE_CRC32) { 414 u32 crc = ~(u32)0; 415 const int csum_size = sizeof(crc); 416 char result[csum_size]; 417 418 /* 419 * The super_block structure does not span the whole 420 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space 421 * is filled with zeros and is included in the checksum. 422 */ 423 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, 424 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); 425 btrfs_csum_final(crc, result); 426 427 if (memcmp(raw_disk_sb, result, csum_size)) 428 ret = 1; 429 } 430 431 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { 432 printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n", 433 csum_type); 434 ret = 1; 435 } 436 437 return ret; 438 } 439 440 /* 441 * helper to read a given tree block, doing retries as required when 442 * the checksums don't match and we have alternate mirrors to try. 443 */ 444 static int btree_read_extent_buffer_pages(struct btrfs_root *root, 445 struct extent_buffer *eb, 446 u64 start, u64 parent_transid) 447 { 448 struct extent_io_tree *io_tree; 449 int failed = 0; 450 int ret; 451 int num_copies = 0; 452 int mirror_num = 0; 453 int failed_mirror = 0; 454 455 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 456 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; 457 while (1) { 458 ret = read_extent_buffer_pages(io_tree, eb, start, 459 WAIT_COMPLETE, 460 btree_get_extent, mirror_num); 461 if (!ret) { 462 if (!verify_parent_transid(io_tree, eb, 463 parent_transid, 0)) 464 break; 465 else 466 ret = -EIO; 467 } 468 469 /* 470 * This buffer's crc is fine, but its contents are corrupted, so 471 * there is no reason to read the other copies, they won't be 472 * any less wrong. 473 */ 474 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) 475 break; 476 477 num_copies = btrfs_num_copies(root->fs_info, 478 eb->start, eb->len); 479 if (num_copies == 1) 480 break; 481 482 if (!failed_mirror) { 483 failed = 1; 484 failed_mirror = eb->read_mirror; 485 } 486 487 mirror_num++; 488 if (mirror_num == failed_mirror) 489 mirror_num++; 490 491 if (mirror_num > num_copies) 492 break; 493 } 494 495 if (failed && !ret && failed_mirror) 496 repair_eb_io_failure(root, eb, failed_mirror); 497 498 return ret; 499 } 500 501 /* 502 * checksum a dirty tree block before IO. This has extra checks to make sure 503 * we only fill in the checksum field in the first page of a multi-page block 504 */ 505 506 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) 507 { 508 u64 start = page_offset(page); 509 u64 found_start; 510 struct extent_buffer *eb; 511 512 eb = (struct extent_buffer *)page->private; 513 if (page != eb->pages[0]) 514 return 0; 515 516 found_start = btrfs_header_bytenr(eb); 517 /* 518 * Please do not consolidate these warnings into a single if. 519 * It is useful to know what went wrong. 520 */ 521 if (WARN_ON(found_start != start)) 522 return -EUCLEAN; 523 if (WARN_ON(!PageUptodate(page))) 524 return -EUCLEAN; 525 526 ASSERT(memcmp_extent_buffer(eb, fs_info->fsid, 527 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0); 528 529 return csum_tree_block(fs_info, eb, 0); 530 } 531 532 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, 533 struct extent_buffer *eb) 534 { 535 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 536 u8 fsid[BTRFS_UUID_SIZE]; 537 int ret = 1; 538 539 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); 540 while (fs_devices) { 541 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) { 542 ret = 0; 543 break; 544 } 545 fs_devices = fs_devices->seed; 546 } 547 return ret; 548 } 549 550 #define CORRUPT(reason, eb, root, slot) \ 551 btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \ 552 "root=%llu, slot=%d", reason, \ 553 btrfs_header_bytenr(eb), root->objectid, slot) 554 555 static noinline int check_leaf(struct btrfs_root *root, 556 struct extent_buffer *leaf) 557 { 558 struct btrfs_key key; 559 struct btrfs_key leaf_key; 560 u32 nritems = btrfs_header_nritems(leaf); 561 int slot; 562 563 if (nritems == 0) 564 return 0; 565 566 /* Check the 0 item */ 567 if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != 568 BTRFS_LEAF_DATA_SIZE(root)) { 569 CORRUPT("invalid item offset size pair", leaf, root, 0); 570 return -EIO; 571 } 572 573 /* 574 * Check to make sure each items keys are in the correct order and their 575 * offsets make sense. We only have to loop through nritems-1 because 576 * we check the current slot against the next slot, which verifies the 577 * next slot's offset+size makes sense and that the current's slot 578 * offset is correct. 579 */ 580 for (slot = 0; slot < nritems - 1; slot++) { 581 btrfs_item_key_to_cpu(leaf, &leaf_key, slot); 582 btrfs_item_key_to_cpu(leaf, &key, slot + 1); 583 584 /* Make sure the keys are in the right order */ 585 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) { 586 CORRUPT("bad key order", leaf, root, slot); 587 return -EIO; 588 } 589 590 /* 591 * Make sure the offset and ends are right, remember that the 592 * item data starts at the end of the leaf and grows towards the 593 * front. 594 */ 595 if (btrfs_item_offset_nr(leaf, slot) != 596 btrfs_item_end_nr(leaf, slot + 1)) { 597 CORRUPT("slot offset bad", leaf, root, slot); 598 return -EIO; 599 } 600 601 /* 602 * Check to make sure that we don't point outside of the leaf, 603 * just in case all the items are consistent to each other, but 604 * all point outside of the leaf. 605 */ 606 if (btrfs_item_end_nr(leaf, slot) > 607 BTRFS_LEAF_DATA_SIZE(root)) { 608 CORRUPT("slot end outside of leaf", leaf, root, slot); 609 return -EIO; 610 } 611 } 612 613 return 0; 614 } 615 616 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, 617 u64 phy_offset, struct page *page, 618 u64 start, u64 end, int mirror) 619 { 620 u64 found_start; 621 int found_level; 622 struct extent_buffer *eb; 623 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 624 struct btrfs_fs_info *fs_info = root->fs_info; 625 int ret = 0; 626 int reads_done; 627 628 if (!page->private) 629 goto out; 630 631 eb = (struct extent_buffer *)page->private; 632 633 /* the pending IO might have been the only thing that kept this buffer 634 * in memory. Make sure we have a ref for all this other checks 635 */ 636 extent_buffer_get(eb); 637 638 reads_done = atomic_dec_and_test(&eb->io_pages); 639 if (!reads_done) 640 goto err; 641 642 eb->read_mirror = mirror; 643 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { 644 ret = -EIO; 645 goto err; 646 } 647 648 found_start = btrfs_header_bytenr(eb); 649 if (found_start != eb->start) { 650 btrfs_err_rl(fs_info, "bad tree block start %llu %llu", 651 found_start, eb->start); 652 ret = -EIO; 653 goto err; 654 } 655 if (check_tree_block_fsid(fs_info, eb)) { 656 btrfs_err_rl(fs_info, "bad fsid on block %llu", 657 eb->start); 658 ret = -EIO; 659 goto err; 660 } 661 found_level = btrfs_header_level(eb); 662 if (found_level >= BTRFS_MAX_LEVEL) { 663 btrfs_err(fs_info, "bad tree block level %d", 664 (int)btrfs_header_level(eb)); 665 ret = -EIO; 666 goto err; 667 } 668 669 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), 670 eb, found_level); 671 672 ret = csum_tree_block(fs_info, eb, 1); 673 if (ret) 674 goto err; 675 676 /* 677 * If this is a leaf block and it is corrupt, set the corrupt bit so 678 * that we don't try and read the other copies of this block, just 679 * return -EIO. 680 */ 681 if (found_level == 0 && check_leaf(root, eb)) { 682 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 683 ret = -EIO; 684 } 685 686 if (!ret) 687 set_extent_buffer_uptodate(eb); 688 err: 689 if (reads_done && 690 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) 691 btree_readahead_hook(fs_info, eb, eb->start, ret); 692 693 if (ret) { 694 /* 695 * our io error hook is going to dec the io pages 696 * again, we have to make sure it has something 697 * to decrement 698 */ 699 atomic_inc(&eb->io_pages); 700 clear_extent_buffer_uptodate(eb); 701 } 702 free_extent_buffer(eb); 703 out: 704 return ret; 705 } 706 707 static int btree_io_failed_hook(struct page *page, int failed_mirror) 708 { 709 struct extent_buffer *eb; 710 711 eb = (struct extent_buffer *)page->private; 712 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); 713 eb->read_mirror = failed_mirror; 714 atomic_dec(&eb->io_pages); 715 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) 716 btree_readahead_hook(eb->fs_info, eb, eb->start, -EIO); 717 return -EIO; /* we fixed nothing */ 718 } 719 720 static void end_workqueue_bio(struct bio *bio) 721 { 722 struct btrfs_end_io_wq *end_io_wq = bio->bi_private; 723 struct btrfs_fs_info *fs_info; 724 struct btrfs_workqueue *wq; 725 btrfs_work_func_t func; 726 727 fs_info = end_io_wq->info; 728 end_io_wq->error = bio->bi_error; 729 730 if (bio->bi_rw & REQ_WRITE) { 731 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { 732 wq = fs_info->endio_meta_write_workers; 733 func = btrfs_endio_meta_write_helper; 734 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { 735 wq = fs_info->endio_freespace_worker; 736 func = btrfs_freespace_write_helper; 737 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { 738 wq = fs_info->endio_raid56_workers; 739 func = btrfs_endio_raid56_helper; 740 } else { 741 wq = fs_info->endio_write_workers; 742 func = btrfs_endio_write_helper; 743 } 744 } else { 745 if (unlikely(end_io_wq->metadata == 746 BTRFS_WQ_ENDIO_DIO_REPAIR)) { 747 wq = fs_info->endio_repair_workers; 748 func = btrfs_endio_repair_helper; 749 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { 750 wq = fs_info->endio_raid56_workers; 751 func = btrfs_endio_raid56_helper; 752 } else if (end_io_wq->metadata) { 753 wq = fs_info->endio_meta_workers; 754 func = btrfs_endio_meta_helper; 755 } else { 756 wq = fs_info->endio_workers; 757 func = btrfs_endio_helper; 758 } 759 } 760 761 btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); 762 btrfs_queue_work(wq, &end_io_wq->work); 763 } 764 765 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 766 enum btrfs_wq_endio_type metadata) 767 { 768 struct btrfs_end_io_wq *end_io_wq; 769 770 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); 771 if (!end_io_wq) 772 return -ENOMEM; 773 774 end_io_wq->private = bio->bi_private; 775 end_io_wq->end_io = bio->bi_end_io; 776 end_io_wq->info = info; 777 end_io_wq->error = 0; 778 end_io_wq->bio = bio; 779 end_io_wq->metadata = metadata; 780 781 bio->bi_private = end_io_wq; 782 bio->bi_end_io = end_workqueue_bio; 783 return 0; 784 } 785 786 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) 787 { 788 unsigned long limit = min_t(unsigned long, 789 info->thread_pool_size, 790 info->fs_devices->open_devices); 791 return 256 * limit; 792 } 793 794 static void run_one_async_start(struct btrfs_work *work) 795 { 796 struct async_submit_bio *async; 797 int ret; 798 799 async = container_of(work, struct async_submit_bio, work); 800 ret = async->submit_bio_start(async->inode, async->rw, async->bio, 801 async->mirror_num, async->bio_flags, 802 async->bio_offset); 803 if (ret) 804 async->error = ret; 805 } 806 807 static void run_one_async_done(struct btrfs_work *work) 808 { 809 struct btrfs_fs_info *fs_info; 810 struct async_submit_bio *async; 811 int limit; 812 813 async = container_of(work, struct async_submit_bio, work); 814 fs_info = BTRFS_I(async->inode)->root->fs_info; 815 816 limit = btrfs_async_submit_limit(fs_info); 817 limit = limit * 2 / 3; 818 819 /* 820 * atomic_dec_return implies a barrier for waitqueue_active 821 */ 822 if (atomic_dec_return(&fs_info->nr_async_submits) < limit && 823 waitqueue_active(&fs_info->async_submit_wait)) 824 wake_up(&fs_info->async_submit_wait); 825 826 /* If an error occurred we just want to clean up the bio and move on */ 827 if (async->error) { 828 async->bio->bi_error = async->error; 829 bio_endio(async->bio); 830 return; 831 } 832 833 async->submit_bio_done(async->inode, async->rw, async->bio, 834 async->mirror_num, async->bio_flags, 835 async->bio_offset); 836 } 837 838 static void run_one_async_free(struct btrfs_work *work) 839 { 840 struct async_submit_bio *async; 841 842 async = container_of(work, struct async_submit_bio, work); 843 kfree(async); 844 } 845 846 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, 847 int rw, struct bio *bio, int mirror_num, 848 unsigned long bio_flags, 849 u64 bio_offset, 850 extent_submit_bio_hook_t *submit_bio_start, 851 extent_submit_bio_hook_t *submit_bio_done) 852 { 853 struct async_submit_bio *async; 854 855 async = kmalloc(sizeof(*async), GFP_NOFS); 856 if (!async) 857 return -ENOMEM; 858 859 async->inode = inode; 860 async->rw = rw; 861 async->bio = bio; 862 async->mirror_num = mirror_num; 863 async->submit_bio_start = submit_bio_start; 864 async->submit_bio_done = submit_bio_done; 865 866 btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, 867 run_one_async_done, run_one_async_free); 868 869 async->bio_flags = bio_flags; 870 async->bio_offset = bio_offset; 871 872 async->error = 0; 873 874 atomic_inc(&fs_info->nr_async_submits); 875 876 if (rw & REQ_SYNC) 877 btrfs_set_work_high_priority(&async->work); 878 879 btrfs_queue_work(fs_info->workers, &async->work); 880 881 while (atomic_read(&fs_info->async_submit_draining) && 882 atomic_read(&fs_info->nr_async_submits)) { 883 wait_event(fs_info->async_submit_wait, 884 (atomic_read(&fs_info->nr_async_submits) == 0)); 885 } 886 887 return 0; 888 } 889 890 static int btree_csum_one_bio(struct bio *bio) 891 { 892 struct bio_vec *bvec; 893 struct btrfs_root *root; 894 int i, ret = 0; 895 896 bio_for_each_segment_all(bvec, bio, i) { 897 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 898 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); 899 if (ret) 900 break; 901 } 902 903 return ret; 904 } 905 906 static int __btree_submit_bio_start(struct inode *inode, int rw, 907 struct bio *bio, int mirror_num, 908 unsigned long bio_flags, 909 u64 bio_offset) 910 { 911 /* 912 * when we're called for a write, we're already in the async 913 * submission context. Just jump into btrfs_map_bio 914 */ 915 return btree_csum_one_bio(bio); 916 } 917 918 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 919 int mirror_num, unsigned long bio_flags, 920 u64 bio_offset) 921 { 922 int ret; 923 924 /* 925 * when we're called for a write, we're already in the async 926 * submission context. Just jump into btrfs_map_bio 927 */ 928 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); 929 if (ret) { 930 bio->bi_error = ret; 931 bio_endio(bio); 932 } 933 return ret; 934 } 935 936 static int check_async_write(struct inode *inode, unsigned long bio_flags) 937 { 938 if (bio_flags & EXTENT_BIO_TREE_LOG) 939 return 0; 940 #ifdef CONFIG_X86 941 if (static_cpu_has(X86_FEATURE_XMM4_2)) 942 return 0; 943 #endif 944 return 1; 945 } 946 947 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 948 int mirror_num, unsigned long bio_flags, 949 u64 bio_offset) 950 { 951 int async = check_async_write(inode, bio_flags); 952 int ret; 953 954 if (!(rw & REQ_WRITE)) { 955 /* 956 * called for a read, do the setup so that checksum validation 957 * can happen in the async kernel threads 958 */ 959 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, 960 bio, BTRFS_WQ_ENDIO_METADATA); 961 if (ret) 962 goto out_w_error; 963 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, 964 mirror_num, 0); 965 } else if (!async) { 966 ret = btree_csum_one_bio(bio); 967 if (ret) 968 goto out_w_error; 969 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, 970 mirror_num, 0); 971 } else { 972 /* 973 * kthread helpers are used to submit writes so that 974 * checksumming can happen in parallel across all CPUs 975 */ 976 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 977 inode, rw, bio, mirror_num, 0, 978 bio_offset, 979 __btree_submit_bio_start, 980 __btree_submit_bio_done); 981 } 982 983 if (ret) 984 goto out_w_error; 985 return 0; 986 987 out_w_error: 988 bio->bi_error = ret; 989 bio_endio(bio); 990 return ret; 991 } 992 993 #ifdef CONFIG_MIGRATION 994 static int btree_migratepage(struct address_space *mapping, 995 struct page *newpage, struct page *page, 996 enum migrate_mode mode) 997 { 998 /* 999 * we can't safely write a btree page from here, 1000 * we haven't done the locking hook 1001 */ 1002 if (PageDirty(page)) 1003 return -EAGAIN; 1004 /* 1005 * Buffers may be managed in a filesystem specific way. 1006 * We must have no buffers or drop them. 1007 */ 1008 if (page_has_private(page) && 1009 !try_to_release_page(page, GFP_KERNEL)) 1010 return -EAGAIN; 1011 return migrate_page(mapping, newpage, page, mode); 1012 } 1013 #endif 1014 1015 1016 static int btree_writepages(struct address_space *mapping, 1017 struct writeback_control *wbc) 1018 { 1019 struct btrfs_fs_info *fs_info; 1020 int ret; 1021 1022 if (wbc->sync_mode == WB_SYNC_NONE) { 1023 1024 if (wbc->for_kupdate) 1025 return 0; 1026 1027 fs_info = BTRFS_I(mapping->host)->root->fs_info; 1028 /* this is a bit racy, but that's ok */ 1029 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, 1030 BTRFS_DIRTY_METADATA_THRESH); 1031 if (ret < 0) 1032 return 0; 1033 } 1034 return btree_write_cache_pages(mapping, wbc); 1035 } 1036 1037 static int btree_readpage(struct file *file, struct page *page) 1038 { 1039 struct extent_io_tree *tree; 1040 tree = &BTRFS_I(page->mapping->host)->io_tree; 1041 return extent_read_full_page(tree, page, btree_get_extent, 0); 1042 } 1043 1044 static int btree_releasepage(struct page *page, gfp_t gfp_flags) 1045 { 1046 if (PageWriteback(page) || PageDirty(page)) 1047 return 0; 1048 1049 return try_release_extent_buffer(page); 1050 } 1051 1052 static void btree_invalidatepage(struct page *page, unsigned int offset, 1053 unsigned int length) 1054 { 1055 struct extent_io_tree *tree; 1056 tree = &BTRFS_I(page->mapping->host)->io_tree; 1057 extent_invalidatepage(tree, page, offset); 1058 btree_releasepage(page, GFP_NOFS); 1059 if (PagePrivate(page)) { 1060 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, 1061 "page private not zero on page %llu", 1062 (unsigned long long)page_offset(page)); 1063 ClearPagePrivate(page); 1064 set_page_private(page, 0); 1065 put_page(page); 1066 } 1067 } 1068 1069 static int btree_set_page_dirty(struct page *page) 1070 { 1071 #ifdef DEBUG 1072 struct extent_buffer *eb; 1073 1074 BUG_ON(!PagePrivate(page)); 1075 eb = (struct extent_buffer *)page->private; 1076 BUG_ON(!eb); 1077 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); 1078 BUG_ON(!atomic_read(&eb->refs)); 1079 btrfs_assert_tree_locked(eb); 1080 #endif 1081 return __set_page_dirty_nobuffers(page); 1082 } 1083 1084 static const struct address_space_operations btree_aops = { 1085 .readpage = btree_readpage, 1086 .writepages = btree_writepages, 1087 .releasepage = btree_releasepage, 1088 .invalidatepage = btree_invalidatepage, 1089 #ifdef CONFIG_MIGRATION 1090 .migratepage = btree_migratepage, 1091 #endif 1092 .set_page_dirty = btree_set_page_dirty, 1093 }; 1094 1095 void readahead_tree_block(struct btrfs_root *root, u64 bytenr) 1096 { 1097 struct extent_buffer *buf = NULL; 1098 struct inode *btree_inode = root->fs_info->btree_inode; 1099 1100 buf = btrfs_find_create_tree_block(root, bytenr); 1101 if (!buf) 1102 return; 1103 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, 1104 buf, 0, WAIT_NONE, btree_get_extent, 0); 1105 free_extent_buffer(buf); 1106 } 1107 1108 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, 1109 int mirror_num, struct extent_buffer **eb) 1110 { 1111 struct extent_buffer *buf = NULL; 1112 struct inode *btree_inode = root->fs_info->btree_inode; 1113 struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; 1114 int ret; 1115 1116 buf = btrfs_find_create_tree_block(root, bytenr); 1117 if (!buf) 1118 return 0; 1119 1120 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); 1121 1122 ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK, 1123 btree_get_extent, mirror_num); 1124 if (ret) { 1125 free_extent_buffer(buf); 1126 return ret; 1127 } 1128 1129 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { 1130 free_extent_buffer(buf); 1131 return -EIO; 1132 } else if (extent_buffer_uptodate(buf)) { 1133 *eb = buf; 1134 } else { 1135 free_extent_buffer(buf); 1136 } 1137 return 0; 1138 } 1139 1140 struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info, 1141 u64 bytenr) 1142 { 1143 return find_extent_buffer(fs_info, bytenr); 1144 } 1145 1146 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, 1147 u64 bytenr) 1148 { 1149 if (btrfs_test_is_dummy_root(root)) 1150 return alloc_test_extent_buffer(root->fs_info, bytenr); 1151 return alloc_extent_buffer(root->fs_info, bytenr); 1152 } 1153 1154 1155 int btrfs_write_tree_block(struct extent_buffer *buf) 1156 { 1157 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, 1158 buf->start + buf->len - 1); 1159 } 1160 1161 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) 1162 { 1163 return filemap_fdatawait_range(buf->pages[0]->mapping, 1164 buf->start, buf->start + buf->len - 1); 1165 } 1166 1167 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, 1168 u64 parent_transid) 1169 { 1170 struct extent_buffer *buf = NULL; 1171 int ret; 1172 1173 buf = btrfs_find_create_tree_block(root, bytenr); 1174 if (!buf) 1175 return ERR_PTR(-ENOMEM); 1176 1177 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 1178 if (ret) { 1179 free_extent_buffer(buf); 1180 return ERR_PTR(ret); 1181 } 1182 return buf; 1183 1184 } 1185 1186 void clean_tree_block(struct btrfs_trans_handle *trans, 1187 struct btrfs_fs_info *fs_info, 1188 struct extent_buffer *buf) 1189 { 1190 if (btrfs_header_generation(buf) == 1191 fs_info->running_transaction->transid) { 1192 btrfs_assert_tree_locked(buf); 1193 1194 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { 1195 __percpu_counter_add(&fs_info->dirty_metadata_bytes, 1196 -buf->len, 1197 fs_info->dirty_metadata_batch); 1198 /* ugh, clear_extent_buffer_dirty needs to lock the page */ 1199 btrfs_set_lock_blocking(buf); 1200 clear_extent_buffer_dirty(buf); 1201 } 1202 } 1203 } 1204 1205 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) 1206 { 1207 struct btrfs_subvolume_writers *writers; 1208 int ret; 1209 1210 writers = kmalloc(sizeof(*writers), GFP_NOFS); 1211 if (!writers) 1212 return ERR_PTR(-ENOMEM); 1213 1214 ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL); 1215 if (ret < 0) { 1216 kfree(writers); 1217 return ERR_PTR(ret); 1218 } 1219 1220 init_waitqueue_head(&writers->wait); 1221 return writers; 1222 } 1223 1224 static void 1225 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers) 1226 { 1227 percpu_counter_destroy(&writers->counter); 1228 kfree(writers); 1229 } 1230 1231 static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize, 1232 struct btrfs_root *root, struct btrfs_fs_info *fs_info, 1233 u64 objectid) 1234 { 1235 root->node = NULL; 1236 root->commit_root = NULL; 1237 root->sectorsize = sectorsize; 1238 root->nodesize = nodesize; 1239 root->stripesize = stripesize; 1240 root->state = 0; 1241 root->orphan_cleanup_state = 0; 1242 1243 root->objectid = objectid; 1244 root->last_trans = 0; 1245 root->highest_objectid = 0; 1246 root->nr_delalloc_inodes = 0; 1247 root->nr_ordered_extents = 0; 1248 root->name = NULL; 1249 root->inode_tree = RB_ROOT; 1250 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); 1251 root->block_rsv = NULL; 1252 root->orphan_block_rsv = NULL; 1253 1254 INIT_LIST_HEAD(&root->dirty_list); 1255 INIT_LIST_HEAD(&root->root_list); 1256 INIT_LIST_HEAD(&root->delalloc_inodes); 1257 INIT_LIST_HEAD(&root->delalloc_root); 1258 INIT_LIST_HEAD(&root->ordered_extents); 1259 INIT_LIST_HEAD(&root->ordered_root); 1260 INIT_LIST_HEAD(&root->logged_list[0]); 1261 INIT_LIST_HEAD(&root->logged_list[1]); 1262 spin_lock_init(&root->orphan_lock); 1263 spin_lock_init(&root->inode_lock); 1264 spin_lock_init(&root->delalloc_lock); 1265 spin_lock_init(&root->ordered_extent_lock); 1266 spin_lock_init(&root->accounting_lock); 1267 spin_lock_init(&root->log_extents_lock[0]); 1268 spin_lock_init(&root->log_extents_lock[1]); 1269 mutex_init(&root->objectid_mutex); 1270 mutex_init(&root->log_mutex); 1271 mutex_init(&root->ordered_extent_mutex); 1272 mutex_init(&root->delalloc_mutex); 1273 init_waitqueue_head(&root->log_writer_wait); 1274 init_waitqueue_head(&root->log_commit_wait[0]); 1275 init_waitqueue_head(&root->log_commit_wait[1]); 1276 INIT_LIST_HEAD(&root->log_ctxs[0]); 1277 INIT_LIST_HEAD(&root->log_ctxs[1]); 1278 atomic_set(&root->log_commit[0], 0); 1279 atomic_set(&root->log_commit[1], 0); 1280 atomic_set(&root->log_writers, 0); 1281 atomic_set(&root->log_batch, 0); 1282 atomic_set(&root->orphan_inodes, 0); 1283 atomic_set(&root->refs, 1); 1284 atomic_set(&root->will_be_snapshoted, 0); 1285 atomic_set(&root->qgroup_meta_rsv, 0); 1286 root->log_transid = 0; 1287 root->log_transid_committed = -1; 1288 root->last_log_commit = 0; 1289 if (fs_info) 1290 extent_io_tree_init(&root->dirty_log_pages, 1291 fs_info->btree_inode->i_mapping); 1292 1293 memset(&root->root_key, 0, sizeof(root->root_key)); 1294 memset(&root->root_item, 0, sizeof(root->root_item)); 1295 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); 1296 if (fs_info) 1297 root->defrag_trans_start = fs_info->generation; 1298 else 1299 root->defrag_trans_start = 0; 1300 root->root_key.objectid = objectid; 1301 root->anon_dev = 0; 1302 1303 spin_lock_init(&root->root_item_lock); 1304 } 1305 1306 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, 1307 gfp_t flags) 1308 { 1309 struct btrfs_root *root = kzalloc(sizeof(*root), flags); 1310 if (root) 1311 root->fs_info = fs_info; 1312 return root; 1313 } 1314 1315 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1316 /* Should only be used by the testing infrastructure */ 1317 struct btrfs_root *btrfs_alloc_dummy_root(void) 1318 { 1319 struct btrfs_root *root; 1320 1321 root = btrfs_alloc_root(NULL, GFP_KERNEL); 1322 if (!root) 1323 return ERR_PTR(-ENOMEM); 1324 __setup_root(4096, 4096, 4096, root, NULL, 1); 1325 set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state); 1326 root->alloc_bytenr = 0; 1327 1328 return root; 1329 } 1330 #endif 1331 1332 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, 1333 struct btrfs_fs_info *fs_info, 1334 u64 objectid) 1335 { 1336 struct extent_buffer *leaf; 1337 struct btrfs_root *tree_root = fs_info->tree_root; 1338 struct btrfs_root *root; 1339 struct btrfs_key key; 1340 int ret = 0; 1341 uuid_le uuid; 1342 1343 root = btrfs_alloc_root(fs_info, GFP_KERNEL); 1344 if (!root) 1345 return ERR_PTR(-ENOMEM); 1346 1347 __setup_root(tree_root->nodesize, tree_root->sectorsize, 1348 tree_root->stripesize, root, fs_info, objectid); 1349 root->root_key.objectid = objectid; 1350 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 1351 root->root_key.offset = 0; 1352 1353 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); 1354 if (IS_ERR(leaf)) { 1355 ret = PTR_ERR(leaf); 1356 leaf = NULL; 1357 goto fail; 1358 } 1359 1360 memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); 1361 btrfs_set_header_bytenr(leaf, leaf->start); 1362 btrfs_set_header_generation(leaf, trans->transid); 1363 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); 1364 btrfs_set_header_owner(leaf, objectid); 1365 root->node = leaf; 1366 1367 write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(), 1368 BTRFS_FSID_SIZE); 1369 write_extent_buffer(leaf, fs_info->chunk_tree_uuid, 1370 btrfs_header_chunk_tree_uuid(leaf), 1371 BTRFS_UUID_SIZE); 1372 btrfs_mark_buffer_dirty(leaf); 1373 1374 root->commit_root = btrfs_root_node(root); 1375 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 1376 1377 root->root_item.flags = 0; 1378 root->root_item.byte_limit = 0; 1379 btrfs_set_root_bytenr(&root->root_item, leaf->start); 1380 btrfs_set_root_generation(&root->root_item, trans->transid); 1381 btrfs_set_root_level(&root->root_item, 0); 1382 btrfs_set_root_refs(&root->root_item, 1); 1383 btrfs_set_root_used(&root->root_item, leaf->len); 1384 btrfs_set_root_last_snapshot(&root->root_item, 0); 1385 btrfs_set_root_dirid(&root->root_item, 0); 1386 uuid_le_gen(&uuid); 1387 memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); 1388 root->root_item.drop_level = 0; 1389 1390 key.objectid = objectid; 1391 key.type = BTRFS_ROOT_ITEM_KEY; 1392 key.offset = 0; 1393 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); 1394 if (ret) 1395 goto fail; 1396 1397 btrfs_tree_unlock(leaf); 1398 1399 return root; 1400 1401 fail: 1402 if (leaf) { 1403 btrfs_tree_unlock(leaf); 1404 free_extent_buffer(root->commit_root); 1405 free_extent_buffer(leaf); 1406 } 1407 kfree(root); 1408 1409 return ERR_PTR(ret); 1410 } 1411 1412 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, 1413 struct btrfs_fs_info *fs_info) 1414 { 1415 struct btrfs_root *root; 1416 struct btrfs_root *tree_root = fs_info->tree_root; 1417 struct extent_buffer *leaf; 1418 1419 root = btrfs_alloc_root(fs_info, GFP_NOFS); 1420 if (!root) 1421 return ERR_PTR(-ENOMEM); 1422 1423 __setup_root(tree_root->nodesize, tree_root->sectorsize, 1424 tree_root->stripesize, root, fs_info, 1425 BTRFS_TREE_LOG_OBJECTID); 1426 1427 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; 1428 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 1429 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; 1430 1431 /* 1432 * DON'T set REF_COWS for log trees 1433 * 1434 * log trees do not get reference counted because they go away 1435 * before a real commit is actually done. They do store pointers 1436 * to file data extents, and those reference counts still get 1437 * updated (along with back refs to the log tree). 1438 */ 1439 1440 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, 1441 NULL, 0, 0, 0); 1442 if (IS_ERR(leaf)) { 1443 kfree(root); 1444 return ERR_CAST(leaf); 1445 } 1446 1447 memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); 1448 btrfs_set_header_bytenr(leaf, leaf->start); 1449 btrfs_set_header_generation(leaf, trans->transid); 1450 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); 1451 btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID); 1452 root->node = leaf; 1453 1454 write_extent_buffer(root->node, root->fs_info->fsid, 1455 btrfs_header_fsid(), BTRFS_FSID_SIZE); 1456 btrfs_mark_buffer_dirty(root->node); 1457 btrfs_tree_unlock(root->node); 1458 return root; 1459 } 1460 1461 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, 1462 struct btrfs_fs_info *fs_info) 1463 { 1464 struct btrfs_root *log_root; 1465 1466 log_root = alloc_log_tree(trans, fs_info); 1467 if (IS_ERR(log_root)) 1468 return PTR_ERR(log_root); 1469 WARN_ON(fs_info->log_root_tree); 1470 fs_info->log_root_tree = log_root; 1471 return 0; 1472 } 1473 1474 int btrfs_add_log_tree(struct btrfs_trans_handle *trans, 1475 struct btrfs_root *root) 1476 { 1477 struct btrfs_root *log_root; 1478 struct btrfs_inode_item *inode_item; 1479 1480 log_root = alloc_log_tree(trans, root->fs_info); 1481 if (IS_ERR(log_root)) 1482 return PTR_ERR(log_root); 1483 1484 log_root->last_trans = trans->transid; 1485 log_root->root_key.offset = root->root_key.objectid; 1486 1487 inode_item = &log_root->root_item.inode; 1488 btrfs_set_stack_inode_generation(inode_item, 1); 1489 btrfs_set_stack_inode_size(inode_item, 3); 1490 btrfs_set_stack_inode_nlink(inode_item, 1); 1491 btrfs_set_stack_inode_nbytes(inode_item, root->nodesize); 1492 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); 1493 1494 btrfs_set_root_node(&log_root->root_item, log_root->node); 1495 1496 WARN_ON(root->log_root); 1497 root->log_root = log_root; 1498 root->log_transid = 0; 1499 root->log_transid_committed = -1; 1500 root->last_log_commit = 0; 1501 return 0; 1502 } 1503 1504 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, 1505 struct btrfs_key *key) 1506 { 1507 struct btrfs_root *root; 1508 struct btrfs_fs_info *fs_info = tree_root->fs_info; 1509 struct btrfs_path *path; 1510 u64 generation; 1511 int ret; 1512 1513 path = btrfs_alloc_path(); 1514 if (!path) 1515 return ERR_PTR(-ENOMEM); 1516 1517 root = btrfs_alloc_root(fs_info, GFP_NOFS); 1518 if (!root) { 1519 ret = -ENOMEM; 1520 goto alloc_fail; 1521 } 1522 1523 __setup_root(tree_root->nodesize, tree_root->sectorsize, 1524 tree_root->stripesize, root, fs_info, key->objectid); 1525 1526 ret = btrfs_find_root(tree_root, key, path, 1527 &root->root_item, &root->root_key); 1528 if (ret) { 1529 if (ret > 0) 1530 ret = -ENOENT; 1531 goto find_fail; 1532 } 1533 1534 generation = btrfs_root_generation(&root->root_item); 1535 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), 1536 generation); 1537 if (IS_ERR(root->node)) { 1538 ret = PTR_ERR(root->node); 1539 goto find_fail; 1540 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { 1541 ret = -EIO; 1542 free_extent_buffer(root->node); 1543 goto find_fail; 1544 } 1545 root->commit_root = btrfs_root_node(root); 1546 out: 1547 btrfs_free_path(path); 1548 return root; 1549 1550 find_fail: 1551 kfree(root); 1552 alloc_fail: 1553 root = ERR_PTR(ret); 1554 goto out; 1555 } 1556 1557 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, 1558 struct btrfs_key *location) 1559 { 1560 struct btrfs_root *root; 1561 1562 root = btrfs_read_tree_root(tree_root, location); 1563 if (IS_ERR(root)) 1564 return root; 1565 1566 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 1567 set_bit(BTRFS_ROOT_REF_COWS, &root->state); 1568 btrfs_check_and_init_root_item(&root->root_item); 1569 } 1570 1571 return root; 1572 } 1573 1574 int btrfs_init_fs_root(struct btrfs_root *root) 1575 { 1576 int ret; 1577 struct btrfs_subvolume_writers *writers; 1578 1579 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); 1580 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), 1581 GFP_NOFS); 1582 if (!root->free_ino_pinned || !root->free_ino_ctl) { 1583 ret = -ENOMEM; 1584 goto fail; 1585 } 1586 1587 writers = btrfs_alloc_subvolume_writers(); 1588 if (IS_ERR(writers)) { 1589 ret = PTR_ERR(writers); 1590 goto fail; 1591 } 1592 root->subv_writers = writers; 1593 1594 btrfs_init_free_ino_ctl(root); 1595 spin_lock_init(&root->ino_cache_lock); 1596 init_waitqueue_head(&root->ino_cache_wait); 1597 1598 ret = get_anon_bdev(&root->anon_dev); 1599 if (ret) 1600 goto free_writers; 1601 1602 mutex_lock(&root->objectid_mutex); 1603 ret = btrfs_find_highest_objectid(root, 1604 &root->highest_objectid); 1605 if (ret) { 1606 mutex_unlock(&root->objectid_mutex); 1607 goto free_root_dev; 1608 } 1609 1610 ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); 1611 1612 mutex_unlock(&root->objectid_mutex); 1613 1614 return 0; 1615 1616 free_root_dev: 1617 free_anon_bdev(root->anon_dev); 1618 free_writers: 1619 btrfs_free_subvolume_writers(root->subv_writers); 1620 fail: 1621 kfree(root->free_ino_ctl); 1622 kfree(root->free_ino_pinned); 1623 return ret; 1624 } 1625 1626 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, 1627 u64 root_id) 1628 { 1629 struct btrfs_root *root; 1630 1631 spin_lock(&fs_info->fs_roots_radix_lock); 1632 root = radix_tree_lookup(&fs_info->fs_roots_radix, 1633 (unsigned long)root_id); 1634 spin_unlock(&fs_info->fs_roots_radix_lock); 1635 return root; 1636 } 1637 1638 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, 1639 struct btrfs_root *root) 1640 { 1641 int ret; 1642 1643 ret = radix_tree_preload(GFP_NOFS); 1644 if (ret) 1645 return ret; 1646 1647 spin_lock(&fs_info->fs_roots_radix_lock); 1648 ret = radix_tree_insert(&fs_info->fs_roots_radix, 1649 (unsigned long)root->root_key.objectid, 1650 root); 1651 if (ret == 0) 1652 set_bit(BTRFS_ROOT_IN_RADIX, &root->state); 1653 spin_unlock(&fs_info->fs_roots_radix_lock); 1654 radix_tree_preload_end(); 1655 1656 return ret; 1657 } 1658 1659 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, 1660 struct btrfs_key *location, 1661 bool check_ref) 1662 { 1663 struct btrfs_root *root; 1664 struct btrfs_path *path; 1665 struct btrfs_key key; 1666 int ret; 1667 1668 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) 1669 return fs_info->tree_root; 1670 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) 1671 return fs_info->extent_root; 1672 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) 1673 return fs_info->chunk_root; 1674 if (location->objectid == BTRFS_DEV_TREE_OBJECTID) 1675 return fs_info->dev_root; 1676 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) 1677 return fs_info->csum_root; 1678 if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) 1679 return fs_info->quota_root ? fs_info->quota_root : 1680 ERR_PTR(-ENOENT); 1681 if (location->objectid == BTRFS_UUID_TREE_OBJECTID) 1682 return fs_info->uuid_root ? fs_info->uuid_root : 1683 ERR_PTR(-ENOENT); 1684 if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) 1685 return fs_info->free_space_root ? fs_info->free_space_root : 1686 ERR_PTR(-ENOENT); 1687 again: 1688 root = btrfs_lookup_fs_root(fs_info, location->objectid); 1689 if (root) { 1690 if (check_ref && btrfs_root_refs(&root->root_item) == 0) 1691 return ERR_PTR(-ENOENT); 1692 return root; 1693 } 1694 1695 root = btrfs_read_fs_root(fs_info->tree_root, location); 1696 if (IS_ERR(root)) 1697 return root; 1698 1699 if (check_ref && btrfs_root_refs(&root->root_item) == 0) { 1700 ret = -ENOENT; 1701 goto fail; 1702 } 1703 1704 ret = btrfs_init_fs_root(root); 1705 if (ret) 1706 goto fail; 1707 1708 path = btrfs_alloc_path(); 1709 if (!path) { 1710 ret = -ENOMEM; 1711 goto fail; 1712 } 1713 key.objectid = BTRFS_ORPHAN_OBJECTID; 1714 key.type = BTRFS_ORPHAN_ITEM_KEY; 1715 key.offset = location->objectid; 1716 1717 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 1718 btrfs_free_path(path); 1719 if (ret < 0) 1720 goto fail; 1721 if (ret == 0) 1722 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); 1723 1724 ret = btrfs_insert_fs_root(fs_info, root); 1725 if (ret) { 1726 if (ret == -EEXIST) { 1727 free_fs_root(root); 1728 goto again; 1729 } 1730 goto fail; 1731 } 1732 return root; 1733 fail: 1734 free_fs_root(root); 1735 return ERR_PTR(ret); 1736 } 1737 1738 static int btrfs_congested_fn(void *congested_data, int bdi_bits) 1739 { 1740 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; 1741 int ret = 0; 1742 struct btrfs_device *device; 1743 struct backing_dev_info *bdi; 1744 1745 rcu_read_lock(); 1746 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { 1747 if (!device->bdev) 1748 continue; 1749 bdi = blk_get_backing_dev_info(device->bdev); 1750 if (bdi_congested(bdi, bdi_bits)) { 1751 ret = 1; 1752 break; 1753 } 1754 } 1755 rcu_read_unlock(); 1756 return ret; 1757 } 1758 1759 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) 1760 { 1761 int err; 1762 1763 err = bdi_setup_and_register(bdi, "btrfs"); 1764 if (err) 1765 return err; 1766 1767 bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE; 1768 bdi->congested_fn = btrfs_congested_fn; 1769 bdi->congested_data = info; 1770 bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; 1771 return 0; 1772 } 1773 1774 /* 1775 * called by the kthread helper functions to finally call the bio end_io 1776 * functions. This is where read checksum verification actually happens 1777 */ 1778 static void end_workqueue_fn(struct btrfs_work *work) 1779 { 1780 struct bio *bio; 1781 struct btrfs_end_io_wq *end_io_wq; 1782 1783 end_io_wq = container_of(work, struct btrfs_end_io_wq, work); 1784 bio = end_io_wq->bio; 1785 1786 bio->bi_error = end_io_wq->error; 1787 bio->bi_private = end_io_wq->private; 1788 bio->bi_end_io = end_io_wq->end_io; 1789 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); 1790 bio_endio(bio); 1791 } 1792 1793 static int cleaner_kthread(void *arg) 1794 { 1795 struct btrfs_root *root = arg; 1796 int again; 1797 struct btrfs_trans_handle *trans; 1798 1799 do { 1800 again = 0; 1801 1802 /* Make the cleaner go to sleep early. */ 1803 if (btrfs_need_cleaner_sleep(root)) 1804 goto sleep; 1805 1806 if (!mutex_trylock(&root->fs_info->cleaner_mutex)) 1807 goto sleep; 1808 1809 /* 1810 * Avoid the problem that we change the status of the fs 1811 * during the above check and trylock. 1812 */ 1813 if (btrfs_need_cleaner_sleep(root)) { 1814 mutex_unlock(&root->fs_info->cleaner_mutex); 1815 goto sleep; 1816 } 1817 1818 mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex); 1819 btrfs_run_delayed_iputs(root); 1820 mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex); 1821 1822 again = btrfs_clean_one_deleted_snapshot(root); 1823 mutex_unlock(&root->fs_info->cleaner_mutex); 1824 1825 /* 1826 * The defragger has dealt with the R/O remount and umount, 1827 * needn't do anything special here. 1828 */ 1829 btrfs_run_defrag_inodes(root->fs_info); 1830 1831 /* 1832 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing 1833 * with relocation (btrfs_relocate_chunk) and relocation 1834 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) 1835 * after acquiring fs_info->delete_unused_bgs_mutex. So we 1836 * can't hold, nor need to, fs_info->cleaner_mutex when deleting 1837 * unused block groups. 1838 */ 1839 btrfs_delete_unused_bgs(root->fs_info); 1840 sleep: 1841 if (!again) { 1842 set_current_state(TASK_INTERRUPTIBLE); 1843 if (!kthread_should_stop()) 1844 schedule(); 1845 __set_current_state(TASK_RUNNING); 1846 } 1847 } while (!kthread_should_stop()); 1848 1849 /* 1850 * Transaction kthread is stopped before us and wakes us up. 1851 * However we might have started a new transaction and COWed some 1852 * tree blocks when deleting unused block groups for example. So 1853 * make sure we commit the transaction we started to have a clean 1854 * shutdown when evicting the btree inode - if it has dirty pages 1855 * when we do the final iput() on it, eviction will trigger a 1856 * writeback for it which will fail with null pointer dereferences 1857 * since work queues and other resources were already released and 1858 * destroyed by the time the iput/eviction/writeback is made. 1859 */ 1860 trans = btrfs_attach_transaction(root); 1861 if (IS_ERR(trans)) { 1862 if (PTR_ERR(trans) != -ENOENT) 1863 btrfs_err(root->fs_info, 1864 "cleaner transaction attach returned %ld", 1865 PTR_ERR(trans)); 1866 } else { 1867 int ret; 1868 1869 ret = btrfs_commit_transaction(trans, root); 1870 if (ret) 1871 btrfs_err(root->fs_info, 1872 "cleaner open transaction commit returned %d", 1873 ret); 1874 } 1875 1876 return 0; 1877 } 1878 1879 static int transaction_kthread(void *arg) 1880 { 1881 struct btrfs_root *root = arg; 1882 struct btrfs_trans_handle *trans; 1883 struct btrfs_transaction *cur; 1884 u64 transid; 1885 unsigned long now; 1886 unsigned long delay; 1887 bool cannot_commit; 1888 1889 do { 1890 cannot_commit = false; 1891 delay = HZ * root->fs_info->commit_interval; 1892 mutex_lock(&root->fs_info->transaction_kthread_mutex); 1893 1894 spin_lock(&root->fs_info->trans_lock); 1895 cur = root->fs_info->running_transaction; 1896 if (!cur) { 1897 spin_unlock(&root->fs_info->trans_lock); 1898 goto sleep; 1899 } 1900 1901 now = get_seconds(); 1902 if (cur->state < TRANS_STATE_BLOCKED && 1903 (now < cur->start_time || 1904 now - cur->start_time < root->fs_info->commit_interval)) { 1905 spin_unlock(&root->fs_info->trans_lock); 1906 delay = HZ * 5; 1907 goto sleep; 1908 } 1909 transid = cur->transid; 1910 spin_unlock(&root->fs_info->trans_lock); 1911 1912 /* If the file system is aborted, this will always fail. */ 1913 trans = btrfs_attach_transaction(root); 1914 if (IS_ERR(trans)) { 1915 if (PTR_ERR(trans) != -ENOENT) 1916 cannot_commit = true; 1917 goto sleep; 1918 } 1919 if (transid == trans->transid) { 1920 btrfs_commit_transaction(trans, root); 1921 } else { 1922 btrfs_end_transaction(trans, root); 1923 } 1924 sleep: 1925 wake_up_process(root->fs_info->cleaner_kthread); 1926 mutex_unlock(&root->fs_info->transaction_kthread_mutex); 1927 1928 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, 1929 &root->fs_info->fs_state))) 1930 btrfs_cleanup_transaction(root); 1931 set_current_state(TASK_INTERRUPTIBLE); 1932 if (!kthread_should_stop() && 1933 (!btrfs_transaction_blocked(root->fs_info) || 1934 cannot_commit)) 1935 schedule_timeout(delay); 1936 __set_current_state(TASK_RUNNING); 1937 } while (!kthread_should_stop()); 1938 return 0; 1939 } 1940 1941 /* 1942 * this will find the highest generation in the array of 1943 * root backups. The index of the highest array is returned, 1944 * or -1 if we can't find anything. 1945 * 1946 * We check to make sure the array is valid by comparing the 1947 * generation of the latest root in the array with the generation 1948 * in the super block. If they don't match we pitch it. 1949 */ 1950 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) 1951 { 1952 u64 cur; 1953 int newest_index = -1; 1954 struct btrfs_root_backup *root_backup; 1955 int i; 1956 1957 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { 1958 root_backup = info->super_copy->super_roots + i; 1959 cur = btrfs_backup_tree_root_gen(root_backup); 1960 if (cur == newest_gen) 1961 newest_index = i; 1962 } 1963 1964 /* check to see if we actually wrapped around */ 1965 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { 1966 root_backup = info->super_copy->super_roots; 1967 cur = btrfs_backup_tree_root_gen(root_backup); 1968 if (cur == newest_gen) 1969 newest_index = 0; 1970 } 1971 return newest_index; 1972 } 1973 1974 1975 /* 1976 * find the oldest backup so we know where to store new entries 1977 * in the backup array. This will set the backup_root_index 1978 * field in the fs_info struct 1979 */ 1980 static void find_oldest_super_backup(struct btrfs_fs_info *info, 1981 u64 newest_gen) 1982 { 1983 int newest_index = -1; 1984 1985 newest_index = find_newest_super_backup(info, newest_gen); 1986 /* if there was garbage in there, just move along */ 1987 if (newest_index == -1) { 1988 info->backup_root_index = 0; 1989 } else { 1990 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; 1991 } 1992 } 1993 1994 /* 1995 * copy all the root pointers into the super backup array. 1996 * this will bump the backup pointer by one when it is 1997 * done 1998 */ 1999 static void backup_super_roots(struct btrfs_fs_info *info) 2000 { 2001 int next_backup; 2002 struct btrfs_root_backup *root_backup; 2003 int last_backup; 2004 2005 next_backup = info->backup_root_index; 2006 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % 2007 BTRFS_NUM_BACKUP_ROOTS; 2008 2009 /* 2010 * just overwrite the last backup if we're at the same generation 2011 * this happens only at umount 2012 */ 2013 root_backup = info->super_for_commit->super_roots + last_backup; 2014 if (btrfs_backup_tree_root_gen(root_backup) == 2015 btrfs_header_generation(info->tree_root->node)) 2016 next_backup = last_backup; 2017 2018 root_backup = info->super_for_commit->super_roots + next_backup; 2019 2020 /* 2021 * make sure all of our padding and empty slots get zero filled 2022 * regardless of which ones we use today 2023 */ 2024 memset(root_backup, 0, sizeof(*root_backup)); 2025 2026 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; 2027 2028 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); 2029 btrfs_set_backup_tree_root_gen(root_backup, 2030 btrfs_header_generation(info->tree_root->node)); 2031 2032 btrfs_set_backup_tree_root_level(root_backup, 2033 btrfs_header_level(info->tree_root->node)); 2034 2035 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); 2036 btrfs_set_backup_chunk_root_gen(root_backup, 2037 btrfs_header_generation(info->chunk_root->node)); 2038 btrfs_set_backup_chunk_root_level(root_backup, 2039 btrfs_header_level(info->chunk_root->node)); 2040 2041 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); 2042 btrfs_set_backup_extent_root_gen(root_backup, 2043 btrfs_header_generation(info->extent_root->node)); 2044 btrfs_set_backup_extent_root_level(root_backup, 2045 btrfs_header_level(info->extent_root->node)); 2046 2047 /* 2048 * we might commit during log recovery, which happens before we set 2049 * the fs_root. Make sure it is valid before we fill it in. 2050 */ 2051 if (info->fs_root && info->fs_root->node) { 2052 btrfs_set_backup_fs_root(root_backup, 2053 info->fs_root->node->start); 2054 btrfs_set_backup_fs_root_gen(root_backup, 2055 btrfs_header_generation(info->fs_root->node)); 2056 btrfs_set_backup_fs_root_level(root_backup, 2057 btrfs_header_level(info->fs_root->node)); 2058 } 2059 2060 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); 2061 btrfs_set_backup_dev_root_gen(root_backup, 2062 btrfs_header_generation(info->dev_root->node)); 2063 btrfs_set_backup_dev_root_level(root_backup, 2064 btrfs_header_level(info->dev_root->node)); 2065 2066 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); 2067 btrfs_set_backup_csum_root_gen(root_backup, 2068 btrfs_header_generation(info->csum_root->node)); 2069 btrfs_set_backup_csum_root_level(root_backup, 2070 btrfs_header_level(info->csum_root->node)); 2071 2072 btrfs_set_backup_total_bytes(root_backup, 2073 btrfs_super_total_bytes(info->super_copy)); 2074 btrfs_set_backup_bytes_used(root_backup, 2075 btrfs_super_bytes_used(info->super_copy)); 2076 btrfs_set_backup_num_devices(root_backup, 2077 btrfs_super_num_devices(info->super_copy)); 2078 2079 /* 2080 * if we don't copy this out to the super_copy, it won't get remembered 2081 * for the next commit 2082 */ 2083 memcpy(&info->super_copy->super_roots, 2084 &info->super_for_commit->super_roots, 2085 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); 2086 } 2087 2088 /* 2089 * this copies info out of the root backup array and back into 2090 * the in-memory super block. It is meant to help iterate through 2091 * the array, so you send it the number of backups you've already 2092 * tried and the last backup index you used. 2093 * 2094 * this returns -1 when it has tried all the backups 2095 */ 2096 static noinline int next_root_backup(struct btrfs_fs_info *info, 2097 struct btrfs_super_block *super, 2098 int *num_backups_tried, int *backup_index) 2099 { 2100 struct btrfs_root_backup *root_backup; 2101 int newest = *backup_index; 2102 2103 if (*num_backups_tried == 0) { 2104 u64 gen = btrfs_super_generation(super); 2105 2106 newest = find_newest_super_backup(info, gen); 2107 if (newest == -1) 2108 return -1; 2109 2110 *backup_index = newest; 2111 *num_backups_tried = 1; 2112 } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { 2113 /* we've tried all the backups, all done */ 2114 return -1; 2115 } else { 2116 /* jump to the next oldest backup */ 2117 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % 2118 BTRFS_NUM_BACKUP_ROOTS; 2119 *backup_index = newest; 2120 *num_backups_tried += 1; 2121 } 2122 root_backup = super->super_roots + newest; 2123 2124 btrfs_set_super_generation(super, 2125 btrfs_backup_tree_root_gen(root_backup)); 2126 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); 2127 btrfs_set_super_root_level(super, 2128 btrfs_backup_tree_root_level(root_backup)); 2129 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); 2130 2131 /* 2132 * fixme: the total bytes and num_devices need to match or we should 2133 * need a fsck 2134 */ 2135 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); 2136 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); 2137 return 0; 2138 } 2139 2140 /* helper to cleanup workers */ 2141 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) 2142 { 2143 btrfs_destroy_workqueue(fs_info->fixup_workers); 2144 btrfs_destroy_workqueue(fs_info->delalloc_workers); 2145 btrfs_destroy_workqueue(fs_info->workers); 2146 btrfs_destroy_workqueue(fs_info->endio_workers); 2147 btrfs_destroy_workqueue(fs_info->endio_meta_workers); 2148 btrfs_destroy_workqueue(fs_info->endio_raid56_workers); 2149 btrfs_destroy_workqueue(fs_info->endio_repair_workers); 2150 btrfs_destroy_workqueue(fs_info->rmw_workers); 2151 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); 2152 btrfs_destroy_workqueue(fs_info->endio_write_workers); 2153 btrfs_destroy_workqueue(fs_info->endio_freespace_worker); 2154 btrfs_destroy_workqueue(fs_info->submit_workers); 2155 btrfs_destroy_workqueue(fs_info->delayed_workers); 2156 btrfs_destroy_workqueue(fs_info->caching_workers); 2157 btrfs_destroy_workqueue(fs_info->readahead_workers); 2158 btrfs_destroy_workqueue(fs_info->flush_workers); 2159 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); 2160 btrfs_destroy_workqueue(fs_info->extent_workers); 2161 } 2162 2163 static void free_root_extent_buffers(struct btrfs_root *root) 2164 { 2165 if (root) { 2166 free_extent_buffer(root->node); 2167 free_extent_buffer(root->commit_root); 2168 root->node = NULL; 2169 root->commit_root = NULL; 2170 } 2171 } 2172 2173 /* helper to cleanup tree roots */ 2174 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) 2175 { 2176 free_root_extent_buffers(info->tree_root); 2177 2178 free_root_extent_buffers(info->dev_root); 2179 free_root_extent_buffers(info->extent_root); 2180 free_root_extent_buffers(info->csum_root); 2181 free_root_extent_buffers(info->quota_root); 2182 free_root_extent_buffers(info->uuid_root); 2183 if (chunk_root) 2184 free_root_extent_buffers(info->chunk_root); 2185 free_root_extent_buffers(info->free_space_root); 2186 } 2187 2188 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) 2189 { 2190 int ret; 2191 struct btrfs_root *gang[8]; 2192 int i; 2193 2194 while (!list_empty(&fs_info->dead_roots)) { 2195 gang[0] = list_entry(fs_info->dead_roots.next, 2196 struct btrfs_root, root_list); 2197 list_del(&gang[0]->root_list); 2198 2199 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) { 2200 btrfs_drop_and_free_fs_root(fs_info, gang[0]); 2201 } else { 2202 free_extent_buffer(gang[0]->node); 2203 free_extent_buffer(gang[0]->commit_root); 2204 btrfs_put_fs_root(gang[0]); 2205 } 2206 } 2207 2208 while (1) { 2209 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 2210 (void **)gang, 0, 2211 ARRAY_SIZE(gang)); 2212 if (!ret) 2213 break; 2214 for (i = 0; i < ret; i++) 2215 btrfs_drop_and_free_fs_root(fs_info, gang[i]); 2216 } 2217 2218 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 2219 btrfs_free_log_root_tree(NULL, fs_info); 2220 btrfs_destroy_pinned_extent(fs_info->tree_root, 2221 fs_info->pinned_extents); 2222 } 2223 } 2224 2225 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) 2226 { 2227 mutex_init(&fs_info->scrub_lock); 2228 atomic_set(&fs_info->scrubs_running, 0); 2229 atomic_set(&fs_info->scrub_pause_req, 0); 2230 atomic_set(&fs_info->scrubs_paused, 0); 2231 atomic_set(&fs_info->scrub_cancel_req, 0); 2232 init_waitqueue_head(&fs_info->scrub_pause_wait); 2233 fs_info->scrub_workers_refcnt = 0; 2234 } 2235 2236 static void btrfs_init_balance(struct btrfs_fs_info *fs_info) 2237 { 2238 spin_lock_init(&fs_info->balance_lock); 2239 mutex_init(&fs_info->balance_mutex); 2240 atomic_set(&fs_info->balance_running, 0); 2241 atomic_set(&fs_info->balance_pause_req, 0); 2242 atomic_set(&fs_info->balance_cancel_req, 0); 2243 fs_info->balance_ctl = NULL; 2244 init_waitqueue_head(&fs_info->balance_wait_q); 2245 } 2246 2247 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info, 2248 struct btrfs_root *tree_root) 2249 { 2250 fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; 2251 set_nlink(fs_info->btree_inode, 1); 2252 /* 2253 * we set the i_size on the btree inode to the max possible int. 2254 * the real end of the address space is determined by all of 2255 * the devices in the system 2256 */ 2257 fs_info->btree_inode->i_size = OFFSET_MAX; 2258 fs_info->btree_inode->i_mapping->a_ops = &btree_aops; 2259 2260 RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); 2261 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, 2262 fs_info->btree_inode->i_mapping); 2263 BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0; 2264 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); 2265 2266 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; 2267 2268 BTRFS_I(fs_info->btree_inode)->root = tree_root; 2269 memset(&BTRFS_I(fs_info->btree_inode)->location, 0, 2270 sizeof(struct btrfs_key)); 2271 set_bit(BTRFS_INODE_DUMMY, 2272 &BTRFS_I(fs_info->btree_inode)->runtime_flags); 2273 btrfs_insert_inode_hash(fs_info->btree_inode); 2274 } 2275 2276 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) 2277 { 2278 fs_info->dev_replace.lock_owner = 0; 2279 atomic_set(&fs_info->dev_replace.nesting_level, 0); 2280 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); 2281 rwlock_init(&fs_info->dev_replace.lock); 2282 atomic_set(&fs_info->dev_replace.read_locks, 0); 2283 atomic_set(&fs_info->dev_replace.blocking_readers, 0); 2284 init_waitqueue_head(&fs_info->replace_wait); 2285 init_waitqueue_head(&fs_info->dev_replace.read_lock_wq); 2286 } 2287 2288 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) 2289 { 2290 spin_lock_init(&fs_info->qgroup_lock); 2291 mutex_init(&fs_info->qgroup_ioctl_lock); 2292 fs_info->qgroup_tree = RB_ROOT; 2293 fs_info->qgroup_op_tree = RB_ROOT; 2294 INIT_LIST_HEAD(&fs_info->dirty_qgroups); 2295 fs_info->qgroup_seq = 1; 2296 fs_info->quota_enabled = 0; 2297 fs_info->pending_quota_state = 0; 2298 fs_info->qgroup_ulist = NULL; 2299 mutex_init(&fs_info->qgroup_rescan_lock); 2300 } 2301 2302 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, 2303 struct btrfs_fs_devices *fs_devices) 2304 { 2305 int max_active = fs_info->thread_pool_size; 2306 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; 2307 2308 fs_info->workers = 2309 btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI, 2310 max_active, 16); 2311 2312 fs_info->delalloc_workers = 2313 btrfs_alloc_workqueue("delalloc", flags, max_active, 2); 2314 2315 fs_info->flush_workers = 2316 btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0); 2317 2318 fs_info->caching_workers = 2319 btrfs_alloc_workqueue("cache", flags, max_active, 0); 2320 2321 /* 2322 * a higher idle thresh on the submit workers makes it much more 2323 * likely that bios will be send down in a sane order to the 2324 * devices 2325 */ 2326 fs_info->submit_workers = 2327 btrfs_alloc_workqueue("submit", flags, 2328 min_t(u64, fs_devices->num_devices, 2329 max_active), 64); 2330 2331 fs_info->fixup_workers = 2332 btrfs_alloc_workqueue("fixup", flags, 1, 0); 2333 2334 /* 2335 * endios are largely parallel and should have a very 2336 * low idle thresh 2337 */ 2338 fs_info->endio_workers = 2339 btrfs_alloc_workqueue("endio", flags, max_active, 4); 2340 fs_info->endio_meta_workers = 2341 btrfs_alloc_workqueue("endio-meta", flags, max_active, 4); 2342 fs_info->endio_meta_write_workers = 2343 btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2); 2344 fs_info->endio_raid56_workers = 2345 btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4); 2346 fs_info->endio_repair_workers = 2347 btrfs_alloc_workqueue("endio-repair", flags, 1, 0); 2348 fs_info->rmw_workers = 2349 btrfs_alloc_workqueue("rmw", flags, max_active, 2); 2350 fs_info->endio_write_workers = 2351 btrfs_alloc_workqueue("endio-write", flags, max_active, 2); 2352 fs_info->endio_freespace_worker = 2353 btrfs_alloc_workqueue("freespace-write", flags, max_active, 0); 2354 fs_info->delayed_workers = 2355 btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0); 2356 fs_info->readahead_workers = 2357 btrfs_alloc_workqueue("readahead", flags, max_active, 2); 2358 fs_info->qgroup_rescan_workers = 2359 btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0); 2360 fs_info->extent_workers = 2361 btrfs_alloc_workqueue("extent-refs", flags, 2362 min_t(u64, fs_devices->num_devices, 2363 max_active), 8); 2364 2365 if (!(fs_info->workers && fs_info->delalloc_workers && 2366 fs_info->submit_workers && fs_info->flush_workers && 2367 fs_info->endio_workers && fs_info->endio_meta_workers && 2368 fs_info->endio_meta_write_workers && 2369 fs_info->endio_repair_workers && 2370 fs_info->endio_write_workers && fs_info->endio_raid56_workers && 2371 fs_info->endio_freespace_worker && fs_info->rmw_workers && 2372 fs_info->caching_workers && fs_info->readahead_workers && 2373 fs_info->fixup_workers && fs_info->delayed_workers && 2374 fs_info->extent_workers && 2375 fs_info->qgroup_rescan_workers)) { 2376 return -ENOMEM; 2377 } 2378 2379 return 0; 2380 } 2381 2382 static int btrfs_replay_log(struct btrfs_fs_info *fs_info, 2383 struct btrfs_fs_devices *fs_devices) 2384 { 2385 int ret; 2386 struct btrfs_root *tree_root = fs_info->tree_root; 2387 struct btrfs_root *log_tree_root; 2388 struct btrfs_super_block *disk_super = fs_info->super_copy; 2389 u64 bytenr = btrfs_super_log_root(disk_super); 2390 2391 if (fs_devices->rw_devices == 0) { 2392 btrfs_warn(fs_info, "log replay required on RO media"); 2393 return -EIO; 2394 } 2395 2396 log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2397 if (!log_tree_root) 2398 return -ENOMEM; 2399 2400 __setup_root(tree_root->nodesize, tree_root->sectorsize, 2401 tree_root->stripesize, log_tree_root, fs_info, 2402 BTRFS_TREE_LOG_OBJECTID); 2403 2404 log_tree_root->node = read_tree_block(tree_root, bytenr, 2405 fs_info->generation + 1); 2406 if (IS_ERR(log_tree_root->node)) { 2407 btrfs_warn(fs_info, "failed to read log tree"); 2408 ret = PTR_ERR(log_tree_root->node); 2409 kfree(log_tree_root); 2410 return ret; 2411 } else if (!extent_buffer_uptodate(log_tree_root->node)) { 2412 btrfs_err(fs_info, "failed to read log tree"); 2413 free_extent_buffer(log_tree_root->node); 2414 kfree(log_tree_root); 2415 return -EIO; 2416 } 2417 /* returns with log_tree_root freed on success */ 2418 ret = btrfs_recover_log_trees(log_tree_root); 2419 if (ret) { 2420 btrfs_handle_fs_error(tree_root->fs_info, ret, 2421 "Failed to recover log tree"); 2422 free_extent_buffer(log_tree_root->node); 2423 kfree(log_tree_root); 2424 return ret; 2425 } 2426 2427 if (fs_info->sb->s_flags & MS_RDONLY) { 2428 ret = btrfs_commit_super(tree_root); 2429 if (ret) 2430 return ret; 2431 } 2432 2433 return 0; 2434 } 2435 2436 static int btrfs_read_roots(struct btrfs_fs_info *fs_info, 2437 struct btrfs_root *tree_root) 2438 { 2439 struct btrfs_root *root; 2440 struct btrfs_key location; 2441 int ret; 2442 2443 location.objectid = BTRFS_EXTENT_TREE_OBJECTID; 2444 location.type = BTRFS_ROOT_ITEM_KEY; 2445 location.offset = 0; 2446 2447 root = btrfs_read_tree_root(tree_root, &location); 2448 if (IS_ERR(root)) 2449 return PTR_ERR(root); 2450 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2451 fs_info->extent_root = root; 2452 2453 location.objectid = BTRFS_DEV_TREE_OBJECTID; 2454 root = btrfs_read_tree_root(tree_root, &location); 2455 if (IS_ERR(root)) 2456 return PTR_ERR(root); 2457 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2458 fs_info->dev_root = root; 2459 btrfs_init_devices_late(fs_info); 2460 2461 location.objectid = BTRFS_CSUM_TREE_OBJECTID; 2462 root = btrfs_read_tree_root(tree_root, &location); 2463 if (IS_ERR(root)) 2464 return PTR_ERR(root); 2465 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2466 fs_info->csum_root = root; 2467 2468 location.objectid = BTRFS_QUOTA_TREE_OBJECTID; 2469 root = btrfs_read_tree_root(tree_root, &location); 2470 if (!IS_ERR(root)) { 2471 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2472 fs_info->quota_enabled = 1; 2473 fs_info->pending_quota_state = 1; 2474 fs_info->quota_root = root; 2475 } 2476 2477 location.objectid = BTRFS_UUID_TREE_OBJECTID; 2478 root = btrfs_read_tree_root(tree_root, &location); 2479 if (IS_ERR(root)) { 2480 ret = PTR_ERR(root); 2481 if (ret != -ENOENT) 2482 return ret; 2483 } else { 2484 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2485 fs_info->uuid_root = root; 2486 } 2487 2488 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 2489 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; 2490 root = btrfs_read_tree_root(tree_root, &location); 2491 if (IS_ERR(root)) 2492 return PTR_ERR(root); 2493 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2494 fs_info->free_space_root = root; 2495 } 2496 2497 return 0; 2498 } 2499 2500 int open_ctree(struct super_block *sb, 2501 struct btrfs_fs_devices *fs_devices, 2502 char *options) 2503 { 2504 u32 sectorsize; 2505 u32 nodesize; 2506 u32 stripesize; 2507 u64 generation; 2508 u64 features; 2509 struct btrfs_key location; 2510 struct buffer_head *bh; 2511 struct btrfs_super_block *disk_super; 2512 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2513 struct btrfs_root *tree_root; 2514 struct btrfs_root *chunk_root; 2515 int ret; 2516 int err = -EINVAL; 2517 int num_backups_tried = 0; 2518 int backup_index = 0; 2519 int max_active; 2520 bool cleaner_mutex_locked = false; 2521 2522 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2523 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2524 if (!tree_root || !chunk_root) { 2525 err = -ENOMEM; 2526 goto fail; 2527 } 2528 2529 ret = init_srcu_struct(&fs_info->subvol_srcu); 2530 if (ret) { 2531 err = ret; 2532 goto fail; 2533 } 2534 2535 ret = setup_bdi(fs_info, &fs_info->bdi); 2536 if (ret) { 2537 err = ret; 2538 goto fail_srcu; 2539 } 2540 2541 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); 2542 if (ret) { 2543 err = ret; 2544 goto fail_bdi; 2545 } 2546 fs_info->dirty_metadata_batch = PAGE_SIZE * 2547 (1 + ilog2(nr_cpu_ids)); 2548 2549 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); 2550 if (ret) { 2551 err = ret; 2552 goto fail_dirty_metadata_bytes; 2553 } 2554 2555 ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL); 2556 if (ret) { 2557 err = ret; 2558 goto fail_delalloc_bytes; 2559 } 2560 2561 fs_info->btree_inode = new_inode(sb); 2562 if (!fs_info->btree_inode) { 2563 err = -ENOMEM; 2564 goto fail_bio_counter; 2565 } 2566 2567 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); 2568 2569 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); 2570 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); 2571 INIT_LIST_HEAD(&fs_info->trans_list); 2572 INIT_LIST_HEAD(&fs_info->dead_roots); 2573 INIT_LIST_HEAD(&fs_info->delayed_iputs); 2574 INIT_LIST_HEAD(&fs_info->delalloc_roots); 2575 INIT_LIST_HEAD(&fs_info->caching_block_groups); 2576 spin_lock_init(&fs_info->delalloc_root_lock); 2577 spin_lock_init(&fs_info->trans_lock); 2578 spin_lock_init(&fs_info->fs_roots_radix_lock); 2579 spin_lock_init(&fs_info->delayed_iput_lock); 2580 spin_lock_init(&fs_info->defrag_inodes_lock); 2581 spin_lock_init(&fs_info->free_chunk_lock); 2582 spin_lock_init(&fs_info->tree_mod_seq_lock); 2583 spin_lock_init(&fs_info->super_lock); 2584 spin_lock_init(&fs_info->qgroup_op_lock); 2585 spin_lock_init(&fs_info->buffer_lock); 2586 spin_lock_init(&fs_info->unused_bgs_lock); 2587 rwlock_init(&fs_info->tree_mod_log_lock); 2588 mutex_init(&fs_info->unused_bg_unpin_mutex); 2589 mutex_init(&fs_info->delete_unused_bgs_mutex); 2590 mutex_init(&fs_info->reloc_mutex); 2591 mutex_init(&fs_info->delalloc_root_mutex); 2592 mutex_init(&fs_info->cleaner_delayed_iput_mutex); 2593 seqlock_init(&fs_info->profiles_lock); 2594 2595 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 2596 INIT_LIST_HEAD(&fs_info->space_info); 2597 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); 2598 INIT_LIST_HEAD(&fs_info->unused_bgs); 2599 btrfs_mapping_init(&fs_info->mapping_tree); 2600 btrfs_init_block_rsv(&fs_info->global_block_rsv, 2601 BTRFS_BLOCK_RSV_GLOBAL); 2602 btrfs_init_block_rsv(&fs_info->delalloc_block_rsv, 2603 BTRFS_BLOCK_RSV_DELALLOC); 2604 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); 2605 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); 2606 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); 2607 btrfs_init_block_rsv(&fs_info->delayed_block_rsv, 2608 BTRFS_BLOCK_RSV_DELOPS); 2609 atomic_set(&fs_info->nr_async_submits, 0); 2610 atomic_set(&fs_info->async_delalloc_pages, 0); 2611 atomic_set(&fs_info->async_submit_draining, 0); 2612 atomic_set(&fs_info->nr_async_bios, 0); 2613 atomic_set(&fs_info->defrag_running, 0); 2614 atomic_set(&fs_info->qgroup_op_seq, 0); 2615 atomic_set(&fs_info->reada_works_cnt, 0); 2616 atomic64_set(&fs_info->tree_mod_seq, 0); 2617 fs_info->sb = sb; 2618 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; 2619 fs_info->metadata_ratio = 0; 2620 fs_info->defrag_inodes = RB_ROOT; 2621 fs_info->free_chunk_space = 0; 2622 fs_info->tree_mod_log = RB_ROOT; 2623 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 2624 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ 2625 /* readahead state */ 2626 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 2627 spin_lock_init(&fs_info->reada_lock); 2628 2629 fs_info->thread_pool_size = min_t(unsigned long, 2630 num_online_cpus() + 2, 8); 2631 2632 INIT_LIST_HEAD(&fs_info->ordered_roots); 2633 spin_lock_init(&fs_info->ordered_root_lock); 2634 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), 2635 GFP_KERNEL); 2636 if (!fs_info->delayed_root) { 2637 err = -ENOMEM; 2638 goto fail_iput; 2639 } 2640 btrfs_init_delayed_root(fs_info->delayed_root); 2641 2642 btrfs_init_scrub(fs_info); 2643 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 2644 fs_info->check_integrity_print_mask = 0; 2645 #endif 2646 btrfs_init_balance(fs_info); 2647 btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work); 2648 2649 sb->s_blocksize = 4096; 2650 sb->s_blocksize_bits = blksize_bits(4096); 2651 sb->s_bdi = &fs_info->bdi; 2652 2653 btrfs_init_btree_inode(fs_info, tree_root); 2654 2655 spin_lock_init(&fs_info->block_group_cache_lock); 2656 fs_info->block_group_cache_tree = RB_ROOT; 2657 fs_info->first_logical_byte = (u64)-1; 2658 2659 extent_io_tree_init(&fs_info->freed_extents[0], 2660 fs_info->btree_inode->i_mapping); 2661 extent_io_tree_init(&fs_info->freed_extents[1], 2662 fs_info->btree_inode->i_mapping); 2663 fs_info->pinned_extents = &fs_info->freed_extents[0]; 2664 fs_info->do_barriers = 1; 2665 2666 2667 mutex_init(&fs_info->ordered_operations_mutex); 2668 mutex_init(&fs_info->tree_log_mutex); 2669 mutex_init(&fs_info->chunk_mutex); 2670 mutex_init(&fs_info->transaction_kthread_mutex); 2671 mutex_init(&fs_info->cleaner_mutex); 2672 mutex_init(&fs_info->volume_mutex); 2673 mutex_init(&fs_info->ro_block_group_mutex); 2674 init_rwsem(&fs_info->commit_root_sem); 2675 init_rwsem(&fs_info->cleanup_work_sem); 2676 init_rwsem(&fs_info->subvol_sem); 2677 sema_init(&fs_info->uuid_tree_rescan_sem, 1); 2678 2679 btrfs_init_dev_replace_locks(fs_info); 2680 btrfs_init_qgroup(fs_info); 2681 2682 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); 2683 btrfs_init_free_cluster(&fs_info->data_alloc_cluster); 2684 2685 init_waitqueue_head(&fs_info->transaction_throttle); 2686 init_waitqueue_head(&fs_info->transaction_wait); 2687 init_waitqueue_head(&fs_info->transaction_blocked_wait); 2688 init_waitqueue_head(&fs_info->async_submit_wait); 2689 2690 INIT_LIST_HEAD(&fs_info->pinned_chunks); 2691 2692 ret = btrfs_alloc_stripe_hash_table(fs_info); 2693 if (ret) { 2694 err = ret; 2695 goto fail_alloc; 2696 } 2697 2698 __setup_root(4096, 4096, 4096, tree_root, 2699 fs_info, BTRFS_ROOT_TREE_OBJECTID); 2700 2701 invalidate_bdev(fs_devices->latest_bdev); 2702 2703 /* 2704 * Read super block and check the signature bytes only 2705 */ 2706 bh = btrfs_read_dev_super(fs_devices->latest_bdev); 2707 if (IS_ERR(bh)) { 2708 err = PTR_ERR(bh); 2709 goto fail_alloc; 2710 } 2711 2712 /* 2713 * We want to check superblock checksum, the type is stored inside. 2714 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). 2715 */ 2716 if (btrfs_check_super_csum(bh->b_data)) { 2717 btrfs_err(fs_info, "superblock checksum mismatch"); 2718 err = -EINVAL; 2719 brelse(bh); 2720 goto fail_alloc; 2721 } 2722 2723 /* 2724 * super_copy is zeroed at allocation time and we never touch the 2725 * following bytes up to INFO_SIZE, the checksum is calculated from 2726 * the whole block of INFO_SIZE 2727 */ 2728 memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); 2729 memcpy(fs_info->super_for_commit, fs_info->super_copy, 2730 sizeof(*fs_info->super_for_commit)); 2731 brelse(bh); 2732 2733 memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); 2734 2735 ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); 2736 if (ret) { 2737 btrfs_err(fs_info, "superblock contains fatal errors"); 2738 err = -EINVAL; 2739 goto fail_alloc; 2740 } 2741 2742 disk_super = fs_info->super_copy; 2743 if (!btrfs_super_root(disk_super)) 2744 goto fail_alloc; 2745 2746 /* check FS state, whether FS is broken. */ 2747 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) 2748 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); 2749 2750 /* 2751 * run through our array of backup supers and setup 2752 * our ring pointer to the oldest one 2753 */ 2754 generation = btrfs_super_generation(disk_super); 2755 find_oldest_super_backup(fs_info, generation); 2756 2757 /* 2758 * In the long term, we'll store the compression type in the super 2759 * block, and it'll be used for per file compression control. 2760 */ 2761 fs_info->compress_type = BTRFS_COMPRESS_ZLIB; 2762 2763 ret = btrfs_parse_options(tree_root, options, sb->s_flags); 2764 if (ret) { 2765 err = ret; 2766 goto fail_alloc; 2767 } 2768 2769 features = btrfs_super_incompat_flags(disk_super) & 2770 ~BTRFS_FEATURE_INCOMPAT_SUPP; 2771 if (features) { 2772 btrfs_err(fs_info, 2773 "cannot mount because of unsupported optional features (%llx)", 2774 features); 2775 err = -EINVAL; 2776 goto fail_alloc; 2777 } 2778 2779 features = btrfs_super_incompat_flags(disk_super); 2780 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; 2781 if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) 2782 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; 2783 2784 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) 2785 btrfs_info(fs_info, "has skinny extents"); 2786 2787 /* 2788 * flag our filesystem as having big metadata blocks if 2789 * they are bigger than the page size 2790 */ 2791 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { 2792 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) 2793 btrfs_info(fs_info, 2794 "flagging fs with big metadata feature"); 2795 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; 2796 } 2797 2798 nodesize = btrfs_super_nodesize(disk_super); 2799 sectorsize = btrfs_super_sectorsize(disk_super); 2800 stripesize = btrfs_super_stripesize(disk_super); 2801 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); 2802 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); 2803 2804 /* 2805 * mixed block groups end up with duplicate but slightly offset 2806 * extent buffers for the same range. It leads to corruptions 2807 */ 2808 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && 2809 (sectorsize != nodesize)) { 2810 btrfs_err(fs_info, 2811 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups", 2812 nodesize, sectorsize); 2813 goto fail_alloc; 2814 } 2815 2816 /* 2817 * Needn't use the lock because there is no other task which will 2818 * update the flag. 2819 */ 2820 btrfs_set_super_incompat_flags(disk_super, features); 2821 2822 features = btrfs_super_compat_ro_flags(disk_super) & 2823 ~BTRFS_FEATURE_COMPAT_RO_SUPP; 2824 if (!(sb->s_flags & MS_RDONLY) && features) { 2825 btrfs_err(fs_info, 2826 "cannot mount read-write because of unsupported optional features (%llx)", 2827 features); 2828 err = -EINVAL; 2829 goto fail_alloc; 2830 } 2831 2832 max_active = fs_info->thread_pool_size; 2833 2834 ret = btrfs_init_workqueues(fs_info, fs_devices); 2835 if (ret) { 2836 err = ret; 2837 goto fail_sb_buffer; 2838 } 2839 2840 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 2841 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 2842 SZ_4M / PAGE_SIZE); 2843 2844 tree_root->nodesize = nodesize; 2845 tree_root->sectorsize = sectorsize; 2846 tree_root->stripesize = stripesize; 2847 2848 sb->s_blocksize = sectorsize; 2849 sb->s_blocksize_bits = blksize_bits(sectorsize); 2850 2851 mutex_lock(&fs_info->chunk_mutex); 2852 ret = btrfs_read_sys_array(tree_root); 2853 mutex_unlock(&fs_info->chunk_mutex); 2854 if (ret) { 2855 btrfs_err(fs_info, "failed to read the system array: %d", ret); 2856 goto fail_sb_buffer; 2857 } 2858 2859 generation = btrfs_super_chunk_root_generation(disk_super); 2860 2861 __setup_root(nodesize, sectorsize, stripesize, chunk_root, 2862 fs_info, BTRFS_CHUNK_TREE_OBJECTID); 2863 2864 chunk_root->node = read_tree_block(chunk_root, 2865 btrfs_super_chunk_root(disk_super), 2866 generation); 2867 if (IS_ERR(chunk_root->node) || 2868 !extent_buffer_uptodate(chunk_root->node)) { 2869 btrfs_err(fs_info, "failed to read chunk root"); 2870 if (!IS_ERR(chunk_root->node)) 2871 free_extent_buffer(chunk_root->node); 2872 chunk_root->node = NULL; 2873 goto fail_tree_roots; 2874 } 2875 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 2876 chunk_root->commit_root = btrfs_root_node(chunk_root); 2877 2878 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, 2879 btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); 2880 2881 ret = btrfs_read_chunk_tree(chunk_root); 2882 if (ret) { 2883 btrfs_err(fs_info, "failed to read chunk tree: %d", ret); 2884 goto fail_tree_roots; 2885 } 2886 2887 /* 2888 * keep the device that is marked to be the target device for the 2889 * dev_replace procedure 2890 */ 2891 btrfs_close_extra_devices(fs_devices, 0); 2892 2893 if (!fs_devices->latest_bdev) { 2894 btrfs_err(fs_info, "failed to read devices"); 2895 goto fail_tree_roots; 2896 } 2897 2898 retry_root_backup: 2899 generation = btrfs_super_generation(disk_super); 2900 2901 tree_root->node = read_tree_block(tree_root, 2902 btrfs_super_root(disk_super), 2903 generation); 2904 if (IS_ERR(tree_root->node) || 2905 !extent_buffer_uptodate(tree_root->node)) { 2906 btrfs_warn(fs_info, "failed to read tree root"); 2907 if (!IS_ERR(tree_root->node)) 2908 free_extent_buffer(tree_root->node); 2909 tree_root->node = NULL; 2910 goto recovery_tree_root; 2911 } 2912 2913 btrfs_set_root_node(&tree_root->root_item, tree_root->node); 2914 tree_root->commit_root = btrfs_root_node(tree_root); 2915 btrfs_set_root_refs(&tree_root->root_item, 1); 2916 2917 mutex_lock(&tree_root->objectid_mutex); 2918 ret = btrfs_find_highest_objectid(tree_root, 2919 &tree_root->highest_objectid); 2920 if (ret) { 2921 mutex_unlock(&tree_root->objectid_mutex); 2922 goto recovery_tree_root; 2923 } 2924 2925 ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); 2926 2927 mutex_unlock(&tree_root->objectid_mutex); 2928 2929 ret = btrfs_read_roots(fs_info, tree_root); 2930 if (ret) 2931 goto recovery_tree_root; 2932 2933 fs_info->generation = generation; 2934 fs_info->last_trans_committed = generation; 2935 2936 ret = btrfs_recover_balance(fs_info); 2937 if (ret) { 2938 btrfs_err(fs_info, "failed to recover balance: %d", ret); 2939 goto fail_block_groups; 2940 } 2941 2942 ret = btrfs_init_dev_stats(fs_info); 2943 if (ret) { 2944 btrfs_err(fs_info, "failed to init dev_stats: %d", ret); 2945 goto fail_block_groups; 2946 } 2947 2948 ret = btrfs_init_dev_replace(fs_info); 2949 if (ret) { 2950 btrfs_err(fs_info, "failed to init dev_replace: %d", ret); 2951 goto fail_block_groups; 2952 } 2953 2954 btrfs_close_extra_devices(fs_devices, 1); 2955 2956 ret = btrfs_sysfs_add_fsid(fs_devices, NULL); 2957 if (ret) { 2958 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", 2959 ret); 2960 goto fail_block_groups; 2961 } 2962 2963 ret = btrfs_sysfs_add_device(fs_devices); 2964 if (ret) { 2965 btrfs_err(fs_info, "failed to init sysfs device interface: %d", 2966 ret); 2967 goto fail_fsdev_sysfs; 2968 } 2969 2970 ret = btrfs_sysfs_add_mounted(fs_info); 2971 if (ret) { 2972 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret); 2973 goto fail_fsdev_sysfs; 2974 } 2975 2976 ret = btrfs_init_space_info(fs_info); 2977 if (ret) { 2978 btrfs_err(fs_info, "failed to initialize space info: %d", ret); 2979 goto fail_sysfs; 2980 } 2981 2982 ret = btrfs_read_block_groups(fs_info->extent_root); 2983 if (ret) { 2984 btrfs_err(fs_info, "failed to read block groups: %d", ret); 2985 goto fail_sysfs; 2986 } 2987 fs_info->num_tolerated_disk_barrier_failures = 2988 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); 2989 if (fs_info->fs_devices->missing_devices > 2990 fs_info->num_tolerated_disk_barrier_failures && 2991 !(sb->s_flags & MS_RDONLY)) { 2992 btrfs_warn(fs_info, 2993 "missing devices (%llu) exceeds the limit (%d), writeable mount is not allowed", 2994 fs_info->fs_devices->missing_devices, 2995 fs_info->num_tolerated_disk_barrier_failures); 2996 goto fail_sysfs; 2997 } 2998 2999 /* 3000 * Hold the cleaner_mutex thread here so that we don't block 3001 * for a long time on btrfs_recover_relocation. cleaner_kthread 3002 * will wait for us to finish mounting the filesystem. 3003 */ 3004 mutex_lock(&fs_info->cleaner_mutex); 3005 cleaner_mutex_locked = true; 3006 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, 3007 "btrfs-cleaner"); 3008 if (IS_ERR(fs_info->cleaner_kthread)) 3009 goto fail_sysfs; 3010 3011 fs_info->transaction_kthread = kthread_run(transaction_kthread, 3012 tree_root, 3013 "btrfs-transaction"); 3014 if (IS_ERR(fs_info->transaction_kthread)) 3015 goto fail_cleaner; 3016 3017 if (!btrfs_test_opt(tree_root, SSD) && 3018 !btrfs_test_opt(tree_root, NOSSD) && 3019 !fs_info->fs_devices->rotating) { 3020 btrfs_info(fs_info, "detected SSD devices, enabling SSD mode"); 3021 btrfs_set_opt(fs_info->mount_opt, SSD); 3022 } 3023 3024 /* 3025 * Mount does not set all options immediately, we can do it now and do 3026 * not have to wait for transaction commit 3027 */ 3028 btrfs_apply_pending_changes(fs_info); 3029 3030 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 3031 if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) { 3032 ret = btrfsic_mount(tree_root, fs_devices, 3033 btrfs_test_opt(tree_root, 3034 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? 3035 1 : 0, 3036 fs_info->check_integrity_print_mask); 3037 if (ret) 3038 btrfs_warn(fs_info, 3039 "failed to initialize integrity check module: %d", 3040 ret); 3041 } 3042 #endif 3043 ret = btrfs_read_qgroup_config(fs_info); 3044 if (ret) 3045 goto fail_trans_kthread; 3046 3047 /* do not make disk changes in broken FS or nologreplay is given */ 3048 if (btrfs_super_log_root(disk_super) != 0 && 3049 !btrfs_test_opt(tree_root, NOLOGREPLAY)) { 3050 ret = btrfs_replay_log(fs_info, fs_devices); 3051 if (ret) { 3052 err = ret; 3053 goto fail_qgroup; 3054 } 3055 } 3056 3057 ret = btrfs_find_orphan_roots(tree_root); 3058 if (ret) 3059 goto fail_qgroup; 3060 3061 if (!(sb->s_flags & MS_RDONLY)) { 3062 ret = btrfs_cleanup_fs_roots(fs_info); 3063 if (ret) 3064 goto fail_qgroup; 3065 /* We locked cleaner_mutex before creating cleaner_kthread. */ 3066 ret = btrfs_recover_relocation(tree_root); 3067 if (ret < 0) { 3068 btrfs_warn(fs_info, "failed to recover relocation: %d", 3069 ret); 3070 err = -EINVAL; 3071 goto fail_qgroup; 3072 } 3073 } 3074 mutex_unlock(&fs_info->cleaner_mutex); 3075 cleaner_mutex_locked = false; 3076 3077 location.objectid = BTRFS_FS_TREE_OBJECTID; 3078 location.type = BTRFS_ROOT_ITEM_KEY; 3079 location.offset = 0; 3080 3081 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); 3082 if (IS_ERR(fs_info->fs_root)) { 3083 err = PTR_ERR(fs_info->fs_root); 3084 goto fail_qgroup; 3085 } 3086 3087 if (sb->s_flags & MS_RDONLY) 3088 return 0; 3089 3090 if (btrfs_test_opt(tree_root, FREE_SPACE_TREE) && 3091 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 3092 btrfs_info(fs_info, "creating free space tree"); 3093 ret = btrfs_create_free_space_tree(fs_info); 3094 if (ret) { 3095 btrfs_warn(fs_info, 3096 "failed to create free space tree: %d", ret); 3097 close_ctree(tree_root); 3098 return ret; 3099 } 3100 } 3101 3102 down_read(&fs_info->cleanup_work_sem); 3103 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || 3104 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { 3105 up_read(&fs_info->cleanup_work_sem); 3106 close_ctree(tree_root); 3107 return ret; 3108 } 3109 up_read(&fs_info->cleanup_work_sem); 3110 3111 ret = btrfs_resume_balance_async(fs_info); 3112 if (ret) { 3113 btrfs_warn(fs_info, "failed to resume balance: %d", ret); 3114 close_ctree(tree_root); 3115 return ret; 3116 } 3117 3118 ret = btrfs_resume_dev_replace_async(fs_info); 3119 if (ret) { 3120 btrfs_warn(fs_info, "failed to resume device replace: %d", ret); 3121 close_ctree(tree_root); 3122 return ret; 3123 } 3124 3125 btrfs_qgroup_rescan_resume(fs_info); 3126 3127 if (btrfs_test_opt(tree_root, CLEAR_CACHE) && 3128 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 3129 btrfs_info(fs_info, "clearing free space tree"); 3130 ret = btrfs_clear_free_space_tree(fs_info); 3131 if (ret) { 3132 btrfs_warn(fs_info, 3133 "failed to clear free space tree: %d", ret); 3134 close_ctree(tree_root); 3135 return ret; 3136 } 3137 } 3138 3139 if (!fs_info->uuid_root) { 3140 btrfs_info(fs_info, "creating UUID tree"); 3141 ret = btrfs_create_uuid_tree(fs_info); 3142 if (ret) { 3143 btrfs_warn(fs_info, 3144 "failed to create the UUID tree: %d", ret); 3145 close_ctree(tree_root); 3146 return ret; 3147 } 3148 } else if (btrfs_test_opt(tree_root, RESCAN_UUID_TREE) || 3149 fs_info->generation != 3150 btrfs_super_uuid_tree_generation(disk_super)) { 3151 btrfs_info(fs_info, "checking UUID tree"); 3152 ret = btrfs_check_uuid_tree(fs_info); 3153 if (ret) { 3154 btrfs_warn(fs_info, 3155 "failed to check the UUID tree: %d", ret); 3156 close_ctree(tree_root); 3157 return ret; 3158 } 3159 } else { 3160 fs_info->update_uuid_tree_gen = 1; 3161 } 3162 3163 fs_info->open = 1; 3164 3165 /* 3166 * backuproot only affect mount behavior, and if open_ctree succeeded, 3167 * no need to keep the flag 3168 */ 3169 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); 3170 3171 return 0; 3172 3173 fail_qgroup: 3174 btrfs_free_qgroup_config(fs_info); 3175 fail_trans_kthread: 3176 kthread_stop(fs_info->transaction_kthread); 3177 btrfs_cleanup_transaction(fs_info->tree_root); 3178 btrfs_free_fs_roots(fs_info); 3179 fail_cleaner: 3180 kthread_stop(fs_info->cleaner_kthread); 3181 3182 /* 3183 * make sure we're done with the btree inode before we stop our 3184 * kthreads 3185 */ 3186 filemap_write_and_wait(fs_info->btree_inode->i_mapping); 3187 3188 fail_sysfs: 3189 if (cleaner_mutex_locked) { 3190 mutex_unlock(&fs_info->cleaner_mutex); 3191 cleaner_mutex_locked = false; 3192 } 3193 btrfs_sysfs_remove_mounted(fs_info); 3194 3195 fail_fsdev_sysfs: 3196 btrfs_sysfs_remove_fsid(fs_info->fs_devices); 3197 3198 fail_block_groups: 3199 btrfs_put_block_group_cache(fs_info); 3200 btrfs_free_block_groups(fs_info); 3201 3202 fail_tree_roots: 3203 free_root_pointers(fs_info, 1); 3204 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 3205 3206 fail_sb_buffer: 3207 btrfs_stop_all_workers(fs_info); 3208 fail_alloc: 3209 fail_iput: 3210 btrfs_mapping_tree_free(&fs_info->mapping_tree); 3211 3212 iput(fs_info->btree_inode); 3213 fail_bio_counter: 3214 percpu_counter_destroy(&fs_info->bio_counter); 3215 fail_delalloc_bytes: 3216 percpu_counter_destroy(&fs_info->delalloc_bytes); 3217 fail_dirty_metadata_bytes: 3218 percpu_counter_destroy(&fs_info->dirty_metadata_bytes); 3219 fail_bdi: 3220 bdi_destroy(&fs_info->bdi); 3221 fail_srcu: 3222 cleanup_srcu_struct(&fs_info->subvol_srcu); 3223 fail: 3224 btrfs_free_stripe_hash_table(fs_info); 3225 btrfs_close_devices(fs_info->fs_devices); 3226 return err; 3227 3228 recovery_tree_root: 3229 if (!btrfs_test_opt(tree_root, USEBACKUPROOT)) 3230 goto fail_tree_roots; 3231 3232 free_root_pointers(fs_info, 0); 3233 3234 /* don't use the log in recovery mode, it won't be valid */ 3235 btrfs_set_super_log_root(disk_super, 0); 3236 3237 /* we can't trust the free space cache either */ 3238 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); 3239 3240 ret = next_root_backup(fs_info, fs_info->super_copy, 3241 &num_backups_tried, &backup_index); 3242 if (ret == -1) 3243 goto fail_block_groups; 3244 goto retry_root_backup; 3245 } 3246 3247 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) 3248 { 3249 if (uptodate) { 3250 set_buffer_uptodate(bh); 3251 } else { 3252 struct btrfs_device *device = (struct btrfs_device *) 3253 bh->b_private; 3254 3255 btrfs_warn_rl_in_rcu(device->dev_root->fs_info, 3256 "lost page write due to IO error on %s", 3257 rcu_str_deref(device->name)); 3258 /* note, we don't set_buffer_write_io_error because we have 3259 * our own ways of dealing with the IO errors 3260 */ 3261 clear_buffer_uptodate(bh); 3262 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); 3263 } 3264 unlock_buffer(bh); 3265 put_bh(bh); 3266 } 3267 3268 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, 3269 struct buffer_head **bh_ret) 3270 { 3271 struct buffer_head *bh; 3272 struct btrfs_super_block *super; 3273 u64 bytenr; 3274 3275 bytenr = btrfs_sb_offset(copy_num); 3276 if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode)) 3277 return -EINVAL; 3278 3279 bh = __bread(bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE); 3280 /* 3281 * If we fail to read from the underlying devices, as of now 3282 * the best option we have is to mark it EIO. 3283 */ 3284 if (!bh) 3285 return -EIO; 3286 3287 super = (struct btrfs_super_block *)bh->b_data; 3288 if (btrfs_super_bytenr(super) != bytenr || 3289 btrfs_super_magic(super) != BTRFS_MAGIC) { 3290 brelse(bh); 3291 return -EINVAL; 3292 } 3293 3294 *bh_ret = bh; 3295 return 0; 3296 } 3297 3298 3299 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) 3300 { 3301 struct buffer_head *bh; 3302 struct buffer_head *latest = NULL; 3303 struct btrfs_super_block *super; 3304 int i; 3305 u64 transid = 0; 3306 int ret = -EINVAL; 3307 3308 /* we would like to check all the supers, but that would make 3309 * a btrfs mount succeed after a mkfs from a different FS. 3310 * So, we need to add a special mount option to scan for 3311 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 3312 */ 3313 for (i = 0; i < 1; i++) { 3314 ret = btrfs_read_dev_one_super(bdev, i, &bh); 3315 if (ret) 3316 continue; 3317 3318 super = (struct btrfs_super_block *)bh->b_data; 3319 3320 if (!latest || btrfs_super_generation(super) > transid) { 3321 brelse(latest); 3322 latest = bh; 3323 transid = btrfs_super_generation(super); 3324 } else { 3325 brelse(bh); 3326 } 3327 } 3328 3329 if (!latest) 3330 return ERR_PTR(ret); 3331 3332 return latest; 3333 } 3334 3335 /* 3336 * this should be called twice, once with wait == 0 and 3337 * once with wait == 1. When wait == 0 is done, all the buffer heads 3338 * we write are pinned. 3339 * 3340 * They are released when wait == 1 is done. 3341 * max_mirrors must be the same for both runs, and it indicates how 3342 * many supers on this one device should be written. 3343 * 3344 * max_mirrors == 0 means to write them all. 3345 */ 3346 static int write_dev_supers(struct btrfs_device *device, 3347 struct btrfs_super_block *sb, 3348 int do_barriers, int wait, int max_mirrors) 3349 { 3350 struct buffer_head *bh; 3351 int i; 3352 int ret; 3353 int errors = 0; 3354 u32 crc; 3355 u64 bytenr; 3356 3357 if (max_mirrors == 0) 3358 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 3359 3360 for (i = 0; i < max_mirrors; i++) { 3361 bytenr = btrfs_sb_offset(i); 3362 if (bytenr + BTRFS_SUPER_INFO_SIZE >= 3363 device->commit_total_bytes) 3364 break; 3365 3366 if (wait) { 3367 bh = __find_get_block(device->bdev, bytenr / 4096, 3368 BTRFS_SUPER_INFO_SIZE); 3369 if (!bh) { 3370 errors++; 3371 continue; 3372 } 3373 wait_on_buffer(bh); 3374 if (!buffer_uptodate(bh)) 3375 errors++; 3376 3377 /* drop our reference */ 3378 brelse(bh); 3379 3380 /* drop the reference from the wait == 0 run */ 3381 brelse(bh); 3382 continue; 3383 } else { 3384 btrfs_set_super_bytenr(sb, bytenr); 3385 3386 crc = ~(u32)0; 3387 crc = btrfs_csum_data((char *)sb + 3388 BTRFS_CSUM_SIZE, crc, 3389 BTRFS_SUPER_INFO_SIZE - 3390 BTRFS_CSUM_SIZE); 3391 btrfs_csum_final(crc, sb->csum); 3392 3393 /* 3394 * one reference for us, and we leave it for the 3395 * caller 3396 */ 3397 bh = __getblk(device->bdev, bytenr / 4096, 3398 BTRFS_SUPER_INFO_SIZE); 3399 if (!bh) { 3400 btrfs_err(device->dev_root->fs_info, 3401 "couldn't get super buffer head for bytenr %llu", 3402 bytenr); 3403 errors++; 3404 continue; 3405 } 3406 3407 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); 3408 3409 /* one reference for submit_bh */ 3410 get_bh(bh); 3411 3412 set_buffer_uptodate(bh); 3413 lock_buffer(bh); 3414 bh->b_end_io = btrfs_end_buffer_write_sync; 3415 bh->b_private = device; 3416 } 3417 3418 /* 3419 * we fua the first super. The others we allow 3420 * to go down lazy. 3421 */ 3422 if (i == 0) 3423 ret = btrfsic_submit_bh(WRITE_FUA, bh); 3424 else 3425 ret = btrfsic_submit_bh(WRITE_SYNC, bh); 3426 if (ret) 3427 errors++; 3428 } 3429 return errors < i ? 0 : -1; 3430 } 3431 3432 /* 3433 * endio for the write_dev_flush, this will wake anyone waiting 3434 * for the barrier when it is done 3435 */ 3436 static void btrfs_end_empty_barrier(struct bio *bio) 3437 { 3438 if (bio->bi_private) 3439 complete(bio->bi_private); 3440 bio_put(bio); 3441 } 3442 3443 /* 3444 * trigger flushes for one the devices. If you pass wait == 0, the flushes are 3445 * sent down. With wait == 1, it waits for the previous flush. 3446 * 3447 * any device where the flush fails with eopnotsupp are flagged as not-barrier 3448 * capable 3449 */ 3450 static int write_dev_flush(struct btrfs_device *device, int wait) 3451 { 3452 struct bio *bio; 3453 int ret = 0; 3454 3455 if (device->nobarriers) 3456 return 0; 3457 3458 if (wait) { 3459 bio = device->flush_bio; 3460 if (!bio) 3461 return 0; 3462 3463 wait_for_completion(&device->flush_wait); 3464 3465 if (bio->bi_error) { 3466 ret = bio->bi_error; 3467 btrfs_dev_stat_inc_and_print(device, 3468 BTRFS_DEV_STAT_FLUSH_ERRS); 3469 } 3470 3471 /* drop the reference from the wait == 0 run */ 3472 bio_put(bio); 3473 device->flush_bio = NULL; 3474 3475 return ret; 3476 } 3477 3478 /* 3479 * one reference for us, and we leave it for the 3480 * caller 3481 */ 3482 device->flush_bio = NULL; 3483 bio = btrfs_io_bio_alloc(GFP_NOFS, 0); 3484 if (!bio) 3485 return -ENOMEM; 3486 3487 bio->bi_end_io = btrfs_end_empty_barrier; 3488 bio->bi_bdev = device->bdev; 3489 init_completion(&device->flush_wait); 3490 bio->bi_private = &device->flush_wait; 3491 device->flush_bio = bio; 3492 3493 bio_get(bio); 3494 btrfsic_submit_bio(WRITE_FLUSH, bio); 3495 3496 return 0; 3497 } 3498 3499 /* 3500 * send an empty flush down to each device in parallel, 3501 * then wait for them 3502 */ 3503 static int barrier_all_devices(struct btrfs_fs_info *info) 3504 { 3505 struct list_head *head; 3506 struct btrfs_device *dev; 3507 int errors_send = 0; 3508 int errors_wait = 0; 3509 int ret; 3510 3511 /* send down all the barriers */ 3512 head = &info->fs_devices->devices; 3513 list_for_each_entry_rcu(dev, head, dev_list) { 3514 if (dev->missing) 3515 continue; 3516 if (!dev->bdev) { 3517 errors_send++; 3518 continue; 3519 } 3520 if (!dev->in_fs_metadata || !dev->writeable) 3521 continue; 3522 3523 ret = write_dev_flush(dev, 0); 3524 if (ret) 3525 errors_send++; 3526 } 3527 3528 /* wait for all the barriers */ 3529 list_for_each_entry_rcu(dev, head, dev_list) { 3530 if (dev->missing) 3531 continue; 3532 if (!dev->bdev) { 3533 errors_wait++; 3534 continue; 3535 } 3536 if (!dev->in_fs_metadata || !dev->writeable) 3537 continue; 3538 3539 ret = write_dev_flush(dev, 1); 3540 if (ret) 3541 errors_wait++; 3542 } 3543 if (errors_send > info->num_tolerated_disk_barrier_failures || 3544 errors_wait > info->num_tolerated_disk_barrier_failures) 3545 return -EIO; 3546 return 0; 3547 } 3548 3549 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) 3550 { 3551 int raid_type; 3552 int min_tolerated = INT_MAX; 3553 3554 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || 3555 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) 3556 min_tolerated = min(min_tolerated, 3557 btrfs_raid_array[BTRFS_RAID_SINGLE]. 3558 tolerated_failures); 3559 3560 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 3561 if (raid_type == BTRFS_RAID_SINGLE) 3562 continue; 3563 if (!(flags & btrfs_raid_group[raid_type])) 3564 continue; 3565 min_tolerated = min(min_tolerated, 3566 btrfs_raid_array[raid_type]. 3567 tolerated_failures); 3568 } 3569 3570 if (min_tolerated == INT_MAX) { 3571 pr_warn("BTRFS: unknown raid flag: %llu\n", flags); 3572 min_tolerated = 0; 3573 } 3574 3575 return min_tolerated; 3576 } 3577 3578 int btrfs_calc_num_tolerated_disk_barrier_failures( 3579 struct btrfs_fs_info *fs_info) 3580 { 3581 struct btrfs_ioctl_space_info space; 3582 struct btrfs_space_info *sinfo; 3583 u64 types[] = {BTRFS_BLOCK_GROUP_DATA, 3584 BTRFS_BLOCK_GROUP_SYSTEM, 3585 BTRFS_BLOCK_GROUP_METADATA, 3586 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA}; 3587 int i; 3588 int c; 3589 int num_tolerated_disk_barrier_failures = 3590 (int)fs_info->fs_devices->num_devices; 3591 3592 for (i = 0; i < ARRAY_SIZE(types); i++) { 3593 struct btrfs_space_info *tmp; 3594 3595 sinfo = NULL; 3596 rcu_read_lock(); 3597 list_for_each_entry_rcu(tmp, &fs_info->space_info, list) { 3598 if (tmp->flags == types[i]) { 3599 sinfo = tmp; 3600 break; 3601 } 3602 } 3603 rcu_read_unlock(); 3604 3605 if (!sinfo) 3606 continue; 3607 3608 down_read(&sinfo->groups_sem); 3609 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { 3610 u64 flags; 3611 3612 if (list_empty(&sinfo->block_groups[c])) 3613 continue; 3614 3615 btrfs_get_block_group_info(&sinfo->block_groups[c], 3616 &space); 3617 if (space.total_bytes == 0 || space.used_bytes == 0) 3618 continue; 3619 flags = space.flags; 3620 3621 num_tolerated_disk_barrier_failures = min( 3622 num_tolerated_disk_barrier_failures, 3623 btrfs_get_num_tolerated_disk_barrier_failures( 3624 flags)); 3625 } 3626 up_read(&sinfo->groups_sem); 3627 } 3628 3629 return num_tolerated_disk_barrier_failures; 3630 } 3631 3632 static int write_all_supers(struct btrfs_root *root, int max_mirrors) 3633 { 3634 struct list_head *head; 3635 struct btrfs_device *dev; 3636 struct btrfs_super_block *sb; 3637 struct btrfs_dev_item *dev_item; 3638 int ret; 3639 int do_barriers; 3640 int max_errors; 3641 int total_errors = 0; 3642 u64 flags; 3643 3644 do_barriers = !btrfs_test_opt(root, NOBARRIER); 3645 backup_super_roots(root->fs_info); 3646 3647 sb = root->fs_info->super_for_commit; 3648 dev_item = &sb->dev_item; 3649 3650 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 3651 head = &root->fs_info->fs_devices->devices; 3652 max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1; 3653 3654 if (do_barriers) { 3655 ret = barrier_all_devices(root->fs_info); 3656 if (ret) { 3657 mutex_unlock( 3658 &root->fs_info->fs_devices->device_list_mutex); 3659 btrfs_handle_fs_error(root->fs_info, ret, 3660 "errors while submitting device barriers."); 3661 return ret; 3662 } 3663 } 3664 3665 list_for_each_entry_rcu(dev, head, dev_list) { 3666 if (!dev->bdev) { 3667 total_errors++; 3668 continue; 3669 } 3670 if (!dev->in_fs_metadata || !dev->writeable) 3671 continue; 3672 3673 btrfs_set_stack_device_generation(dev_item, 0); 3674 btrfs_set_stack_device_type(dev_item, dev->type); 3675 btrfs_set_stack_device_id(dev_item, dev->devid); 3676 btrfs_set_stack_device_total_bytes(dev_item, 3677 dev->commit_total_bytes); 3678 btrfs_set_stack_device_bytes_used(dev_item, 3679 dev->commit_bytes_used); 3680 btrfs_set_stack_device_io_align(dev_item, dev->io_align); 3681 btrfs_set_stack_device_io_width(dev_item, dev->io_width); 3682 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); 3683 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); 3684 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE); 3685 3686 flags = btrfs_super_flags(sb); 3687 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); 3688 3689 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors); 3690 if (ret) 3691 total_errors++; 3692 } 3693 if (total_errors > max_errors) { 3694 btrfs_err(root->fs_info, "%d errors while writing supers", 3695 total_errors); 3696 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 3697 3698 /* FUA is masked off if unsupported and can't be the reason */ 3699 btrfs_handle_fs_error(root->fs_info, -EIO, 3700 "%d errors while writing supers", total_errors); 3701 return -EIO; 3702 } 3703 3704 total_errors = 0; 3705 list_for_each_entry_rcu(dev, head, dev_list) { 3706 if (!dev->bdev) 3707 continue; 3708 if (!dev->in_fs_metadata || !dev->writeable) 3709 continue; 3710 3711 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors); 3712 if (ret) 3713 total_errors++; 3714 } 3715 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 3716 if (total_errors > max_errors) { 3717 btrfs_handle_fs_error(root->fs_info, -EIO, 3718 "%d errors while writing supers", total_errors); 3719 return -EIO; 3720 } 3721 return 0; 3722 } 3723 3724 int write_ctree_super(struct btrfs_trans_handle *trans, 3725 struct btrfs_root *root, int max_mirrors) 3726 { 3727 return write_all_supers(root, max_mirrors); 3728 } 3729 3730 /* Drop a fs root from the radix tree and free it. */ 3731 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, 3732 struct btrfs_root *root) 3733 { 3734 spin_lock(&fs_info->fs_roots_radix_lock); 3735 radix_tree_delete(&fs_info->fs_roots_radix, 3736 (unsigned long)root->root_key.objectid); 3737 spin_unlock(&fs_info->fs_roots_radix_lock); 3738 3739 if (btrfs_root_refs(&root->root_item) == 0) 3740 synchronize_srcu(&fs_info->subvol_srcu); 3741 3742 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 3743 btrfs_free_log(NULL, root); 3744 3745 if (root->free_ino_pinned) 3746 __btrfs_remove_free_space_cache(root->free_ino_pinned); 3747 if (root->free_ino_ctl) 3748 __btrfs_remove_free_space_cache(root->free_ino_ctl); 3749 free_fs_root(root); 3750 } 3751 3752 static void free_fs_root(struct btrfs_root *root) 3753 { 3754 iput(root->ino_cache_inode); 3755 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); 3756 btrfs_free_block_rsv(root, root->orphan_block_rsv); 3757 root->orphan_block_rsv = NULL; 3758 if (root->anon_dev) 3759 free_anon_bdev(root->anon_dev); 3760 if (root->subv_writers) 3761 btrfs_free_subvolume_writers(root->subv_writers); 3762 free_extent_buffer(root->node); 3763 free_extent_buffer(root->commit_root); 3764 kfree(root->free_ino_ctl); 3765 kfree(root->free_ino_pinned); 3766 kfree(root->name); 3767 btrfs_put_fs_root(root); 3768 } 3769 3770 void btrfs_free_fs_root(struct btrfs_root *root) 3771 { 3772 free_fs_root(root); 3773 } 3774 3775 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) 3776 { 3777 u64 root_objectid = 0; 3778 struct btrfs_root *gang[8]; 3779 int i = 0; 3780 int err = 0; 3781 unsigned int ret = 0; 3782 int index; 3783 3784 while (1) { 3785 index = srcu_read_lock(&fs_info->subvol_srcu); 3786 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 3787 (void **)gang, root_objectid, 3788 ARRAY_SIZE(gang)); 3789 if (!ret) { 3790 srcu_read_unlock(&fs_info->subvol_srcu, index); 3791 break; 3792 } 3793 root_objectid = gang[ret - 1]->root_key.objectid + 1; 3794 3795 for (i = 0; i < ret; i++) { 3796 /* Avoid to grab roots in dead_roots */ 3797 if (btrfs_root_refs(&gang[i]->root_item) == 0) { 3798 gang[i] = NULL; 3799 continue; 3800 } 3801 /* grab all the search result for later use */ 3802 gang[i] = btrfs_grab_fs_root(gang[i]); 3803 } 3804 srcu_read_unlock(&fs_info->subvol_srcu, index); 3805 3806 for (i = 0; i < ret; i++) { 3807 if (!gang[i]) 3808 continue; 3809 root_objectid = gang[i]->root_key.objectid; 3810 err = btrfs_orphan_cleanup(gang[i]); 3811 if (err) 3812 break; 3813 btrfs_put_fs_root(gang[i]); 3814 } 3815 root_objectid++; 3816 } 3817 3818 /* release the uncleaned roots due to error */ 3819 for (; i < ret; i++) { 3820 if (gang[i]) 3821 btrfs_put_fs_root(gang[i]); 3822 } 3823 return err; 3824 } 3825 3826 int btrfs_commit_super(struct btrfs_root *root) 3827 { 3828 struct btrfs_trans_handle *trans; 3829 3830 mutex_lock(&root->fs_info->cleaner_mutex); 3831 btrfs_run_delayed_iputs(root); 3832 mutex_unlock(&root->fs_info->cleaner_mutex); 3833 wake_up_process(root->fs_info->cleaner_kthread); 3834 3835 /* wait until ongoing cleanup work done */ 3836 down_write(&root->fs_info->cleanup_work_sem); 3837 up_write(&root->fs_info->cleanup_work_sem); 3838 3839 trans = btrfs_join_transaction(root); 3840 if (IS_ERR(trans)) 3841 return PTR_ERR(trans); 3842 return btrfs_commit_transaction(trans, root); 3843 } 3844 3845 void close_ctree(struct btrfs_root *root) 3846 { 3847 struct btrfs_fs_info *fs_info = root->fs_info; 3848 int ret; 3849 3850 fs_info->closing = 1; 3851 smp_mb(); 3852 3853 /* wait for the qgroup rescan worker to stop */ 3854 btrfs_qgroup_wait_for_completion(fs_info); 3855 3856 /* wait for the uuid_scan task to finish */ 3857 down(&fs_info->uuid_tree_rescan_sem); 3858 /* avoid complains from lockdep et al., set sem back to initial state */ 3859 up(&fs_info->uuid_tree_rescan_sem); 3860 3861 /* pause restriper - we want to resume on mount */ 3862 btrfs_pause_balance(fs_info); 3863 3864 btrfs_dev_replace_suspend_for_unmount(fs_info); 3865 3866 btrfs_scrub_cancel(fs_info); 3867 3868 /* wait for any defraggers to finish */ 3869 wait_event(fs_info->transaction_wait, 3870 (atomic_read(&fs_info->defrag_running) == 0)); 3871 3872 /* clear out the rbtree of defraggable inodes */ 3873 btrfs_cleanup_defrag_inodes(fs_info); 3874 3875 cancel_work_sync(&fs_info->async_reclaim_work); 3876 3877 if (!(fs_info->sb->s_flags & MS_RDONLY)) { 3878 /* 3879 * If the cleaner thread is stopped and there are 3880 * block groups queued for removal, the deletion will be 3881 * skipped when we quit the cleaner thread. 3882 */ 3883 btrfs_delete_unused_bgs(root->fs_info); 3884 3885 ret = btrfs_commit_super(root); 3886 if (ret) 3887 btrfs_err(fs_info, "commit super ret %d", ret); 3888 } 3889 3890 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 3891 btrfs_error_commit_super(root); 3892 3893 kthread_stop(fs_info->transaction_kthread); 3894 kthread_stop(fs_info->cleaner_kthread); 3895 3896 fs_info->closing = 2; 3897 smp_mb(); 3898 3899 btrfs_free_qgroup_config(fs_info); 3900 3901 if (percpu_counter_sum(&fs_info->delalloc_bytes)) { 3902 btrfs_info(fs_info, "at unmount delalloc count %lld", 3903 percpu_counter_sum(&fs_info->delalloc_bytes)); 3904 } 3905 3906 btrfs_sysfs_remove_mounted(fs_info); 3907 btrfs_sysfs_remove_fsid(fs_info->fs_devices); 3908 3909 btrfs_free_fs_roots(fs_info); 3910 3911 btrfs_put_block_group_cache(fs_info); 3912 3913 btrfs_free_block_groups(fs_info); 3914 3915 /* 3916 * we must make sure there is not any read request to 3917 * submit after we stopping all workers. 3918 */ 3919 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 3920 btrfs_stop_all_workers(fs_info); 3921 3922 fs_info->open = 0; 3923 free_root_pointers(fs_info, 1); 3924 3925 iput(fs_info->btree_inode); 3926 3927 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 3928 if (btrfs_test_opt(root, CHECK_INTEGRITY)) 3929 btrfsic_unmount(root, fs_info->fs_devices); 3930 #endif 3931 3932 btrfs_close_devices(fs_info->fs_devices); 3933 btrfs_mapping_tree_free(&fs_info->mapping_tree); 3934 3935 percpu_counter_destroy(&fs_info->dirty_metadata_bytes); 3936 percpu_counter_destroy(&fs_info->delalloc_bytes); 3937 percpu_counter_destroy(&fs_info->bio_counter); 3938 bdi_destroy(&fs_info->bdi); 3939 cleanup_srcu_struct(&fs_info->subvol_srcu); 3940 3941 btrfs_free_stripe_hash_table(fs_info); 3942 3943 __btrfs_free_block_rsv(root->orphan_block_rsv); 3944 root->orphan_block_rsv = NULL; 3945 3946 lock_chunks(root); 3947 while (!list_empty(&fs_info->pinned_chunks)) { 3948 struct extent_map *em; 3949 3950 em = list_first_entry(&fs_info->pinned_chunks, 3951 struct extent_map, list); 3952 list_del_init(&em->list); 3953 free_extent_map(em); 3954 } 3955 unlock_chunks(root); 3956 } 3957 3958 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, 3959 int atomic) 3960 { 3961 int ret; 3962 struct inode *btree_inode = buf->pages[0]->mapping->host; 3963 3964 ret = extent_buffer_uptodate(buf); 3965 if (!ret) 3966 return ret; 3967 3968 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, 3969 parent_transid, atomic); 3970 if (ret == -EAGAIN) 3971 return ret; 3972 return !ret; 3973 } 3974 3975 void btrfs_mark_buffer_dirty(struct extent_buffer *buf) 3976 { 3977 struct btrfs_root *root; 3978 u64 transid = btrfs_header_generation(buf); 3979 int was_dirty; 3980 3981 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3982 /* 3983 * This is a fast path so only do this check if we have sanity tests 3984 * enabled. Normal people shouldn't be marking dummy buffers as dirty 3985 * outside of the sanity tests. 3986 */ 3987 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags))) 3988 return; 3989 #endif 3990 root = BTRFS_I(buf->pages[0]->mapping->host)->root; 3991 btrfs_assert_tree_locked(buf); 3992 if (transid != root->fs_info->generation) 3993 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, " 3994 "found %llu running %llu\n", 3995 buf->start, transid, root->fs_info->generation); 3996 was_dirty = set_extent_buffer_dirty(buf); 3997 if (!was_dirty) 3998 __percpu_counter_add(&root->fs_info->dirty_metadata_bytes, 3999 buf->len, 4000 root->fs_info->dirty_metadata_batch); 4001 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 4002 if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) { 4003 btrfs_print_leaf(root, buf); 4004 ASSERT(0); 4005 } 4006 #endif 4007 } 4008 4009 static void __btrfs_btree_balance_dirty(struct btrfs_root *root, 4010 int flush_delayed) 4011 { 4012 /* 4013 * looks as though older kernels can get into trouble with 4014 * this code, they end up stuck in balance_dirty_pages forever 4015 */ 4016 int ret; 4017 4018 if (current->flags & PF_MEMALLOC) 4019 return; 4020 4021 if (flush_delayed) 4022 btrfs_balance_delayed_items(root); 4023 4024 ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes, 4025 BTRFS_DIRTY_METADATA_THRESH); 4026 if (ret > 0) { 4027 balance_dirty_pages_ratelimited( 4028 root->fs_info->btree_inode->i_mapping); 4029 } 4030 } 4031 4032 void btrfs_btree_balance_dirty(struct btrfs_root *root) 4033 { 4034 __btrfs_btree_balance_dirty(root, 1); 4035 } 4036 4037 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root) 4038 { 4039 __btrfs_btree_balance_dirty(root, 0); 4040 } 4041 4042 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) 4043 { 4044 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; 4045 return btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 4046 } 4047 4048 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, 4049 int read_only) 4050 { 4051 struct btrfs_super_block *sb = fs_info->super_copy; 4052 u64 nodesize = btrfs_super_nodesize(sb); 4053 u64 sectorsize = btrfs_super_sectorsize(sb); 4054 int ret = 0; 4055 4056 if (btrfs_super_magic(sb) != BTRFS_MAGIC) { 4057 printk(KERN_ERR "BTRFS: no valid FS found\n"); 4058 ret = -EINVAL; 4059 } 4060 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) 4061 printk(KERN_WARNING "BTRFS: unrecognized super flag: %llu\n", 4062 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); 4063 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { 4064 printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n", 4065 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); 4066 ret = -EINVAL; 4067 } 4068 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { 4069 printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n", 4070 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); 4071 ret = -EINVAL; 4072 } 4073 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { 4074 printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n", 4075 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); 4076 ret = -EINVAL; 4077 } 4078 4079 /* 4080 * Check sectorsize and nodesize first, other check will need it. 4081 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here. 4082 */ 4083 if (!is_power_of_2(sectorsize) || sectorsize < 4096 || 4084 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { 4085 printk(KERN_ERR "BTRFS: invalid sectorsize %llu\n", sectorsize); 4086 ret = -EINVAL; 4087 } 4088 /* Only PAGE SIZE is supported yet */ 4089 if (sectorsize != PAGE_SIZE) { 4090 printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n", 4091 sectorsize, PAGE_SIZE); 4092 ret = -EINVAL; 4093 } 4094 if (!is_power_of_2(nodesize) || nodesize < sectorsize || 4095 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) { 4096 printk(KERN_ERR "BTRFS: invalid nodesize %llu\n", nodesize); 4097 ret = -EINVAL; 4098 } 4099 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) { 4100 printk(KERN_ERR "BTRFS: invalid leafsize %u, should be %llu\n", 4101 le32_to_cpu(sb->__unused_leafsize), 4102 nodesize); 4103 ret = -EINVAL; 4104 } 4105 4106 /* Root alignment check */ 4107 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { 4108 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", 4109 btrfs_super_root(sb)); 4110 ret = -EINVAL; 4111 } 4112 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { 4113 printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n", 4114 btrfs_super_chunk_root(sb)); 4115 ret = -EINVAL; 4116 } 4117 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { 4118 printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n", 4119 btrfs_super_log_root(sb)); 4120 ret = -EINVAL; 4121 } 4122 4123 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) { 4124 printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n", 4125 fs_info->fsid, sb->dev_item.fsid); 4126 ret = -EINVAL; 4127 } 4128 4129 /* 4130 * Hint to catch really bogus numbers, bitflips or so, more exact checks are 4131 * done later 4132 */ 4133 if (btrfs_super_num_devices(sb) > (1UL << 31)) 4134 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", 4135 btrfs_super_num_devices(sb)); 4136 if (btrfs_super_num_devices(sb) == 0) { 4137 printk(KERN_ERR "BTRFS: number of devices is 0\n"); 4138 ret = -EINVAL; 4139 } 4140 4141 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) { 4142 printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n", 4143 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); 4144 ret = -EINVAL; 4145 } 4146 4147 /* 4148 * Obvious sys_chunk_array corruptions, it must hold at least one key 4149 * and one chunk 4150 */ 4151 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4152 printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n", 4153 btrfs_super_sys_array_size(sb), 4154 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); 4155 ret = -EINVAL; 4156 } 4157 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) 4158 + sizeof(struct btrfs_chunk)) { 4159 printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n", 4160 btrfs_super_sys_array_size(sb), 4161 sizeof(struct btrfs_disk_key) 4162 + sizeof(struct btrfs_chunk)); 4163 ret = -EINVAL; 4164 } 4165 4166 /* 4167 * The generation is a global counter, we'll trust it more than the others 4168 * but it's still possible that it's the one that's wrong. 4169 */ 4170 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) 4171 printk(KERN_WARNING 4172 "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n", 4173 btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb)); 4174 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) 4175 && btrfs_super_cache_generation(sb) != (u64)-1) 4176 printk(KERN_WARNING 4177 "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n", 4178 btrfs_super_generation(sb), btrfs_super_cache_generation(sb)); 4179 4180 return ret; 4181 } 4182 4183 static void btrfs_error_commit_super(struct btrfs_root *root) 4184 { 4185 mutex_lock(&root->fs_info->cleaner_mutex); 4186 btrfs_run_delayed_iputs(root); 4187 mutex_unlock(&root->fs_info->cleaner_mutex); 4188 4189 down_write(&root->fs_info->cleanup_work_sem); 4190 up_write(&root->fs_info->cleanup_work_sem); 4191 4192 /* cleanup FS via transaction */ 4193 btrfs_cleanup_transaction(root); 4194 } 4195 4196 static void btrfs_destroy_ordered_extents(struct btrfs_root *root) 4197 { 4198 struct btrfs_ordered_extent *ordered; 4199 4200 spin_lock(&root->ordered_extent_lock); 4201 /* 4202 * This will just short circuit the ordered completion stuff which will 4203 * make sure the ordered extent gets properly cleaned up. 4204 */ 4205 list_for_each_entry(ordered, &root->ordered_extents, 4206 root_extent_list) 4207 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); 4208 spin_unlock(&root->ordered_extent_lock); 4209 } 4210 4211 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) 4212 { 4213 struct btrfs_root *root; 4214 struct list_head splice; 4215 4216 INIT_LIST_HEAD(&splice); 4217 4218 spin_lock(&fs_info->ordered_root_lock); 4219 list_splice_init(&fs_info->ordered_roots, &splice); 4220 while (!list_empty(&splice)) { 4221 root = list_first_entry(&splice, struct btrfs_root, 4222 ordered_root); 4223 list_move_tail(&root->ordered_root, 4224 &fs_info->ordered_roots); 4225 4226 spin_unlock(&fs_info->ordered_root_lock); 4227 btrfs_destroy_ordered_extents(root); 4228 4229 cond_resched(); 4230 spin_lock(&fs_info->ordered_root_lock); 4231 } 4232 spin_unlock(&fs_info->ordered_root_lock); 4233 } 4234 4235 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 4236 struct btrfs_root *root) 4237 { 4238 struct rb_node *node; 4239 struct btrfs_delayed_ref_root *delayed_refs; 4240 struct btrfs_delayed_ref_node *ref; 4241 int ret = 0; 4242 4243 delayed_refs = &trans->delayed_refs; 4244 4245 spin_lock(&delayed_refs->lock); 4246 if (atomic_read(&delayed_refs->num_entries) == 0) { 4247 spin_unlock(&delayed_refs->lock); 4248 btrfs_info(root->fs_info, "delayed_refs has NO entry"); 4249 return ret; 4250 } 4251 4252 while ((node = rb_first(&delayed_refs->href_root)) != NULL) { 4253 struct btrfs_delayed_ref_head *head; 4254 struct btrfs_delayed_ref_node *tmp; 4255 bool pin_bytes = false; 4256 4257 head = rb_entry(node, struct btrfs_delayed_ref_head, 4258 href_node); 4259 if (!mutex_trylock(&head->mutex)) { 4260 atomic_inc(&head->node.refs); 4261 spin_unlock(&delayed_refs->lock); 4262 4263 mutex_lock(&head->mutex); 4264 mutex_unlock(&head->mutex); 4265 btrfs_put_delayed_ref(&head->node); 4266 spin_lock(&delayed_refs->lock); 4267 continue; 4268 } 4269 spin_lock(&head->lock); 4270 list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list, 4271 list) { 4272 ref->in_tree = 0; 4273 list_del(&ref->list); 4274 atomic_dec(&delayed_refs->num_entries); 4275 btrfs_put_delayed_ref(ref); 4276 } 4277 if (head->must_insert_reserved) 4278 pin_bytes = true; 4279 btrfs_free_delayed_extent_op(head->extent_op); 4280 delayed_refs->num_heads--; 4281 if (head->processing == 0) 4282 delayed_refs->num_heads_ready--; 4283 atomic_dec(&delayed_refs->num_entries); 4284 head->node.in_tree = 0; 4285 rb_erase(&head->href_node, &delayed_refs->href_root); 4286 spin_unlock(&head->lock); 4287 spin_unlock(&delayed_refs->lock); 4288 mutex_unlock(&head->mutex); 4289 4290 if (pin_bytes) 4291 btrfs_pin_extent(root, head->node.bytenr, 4292 head->node.num_bytes, 1); 4293 btrfs_put_delayed_ref(&head->node); 4294 cond_resched(); 4295 spin_lock(&delayed_refs->lock); 4296 } 4297 4298 spin_unlock(&delayed_refs->lock); 4299 4300 return ret; 4301 } 4302 4303 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) 4304 { 4305 struct btrfs_inode *btrfs_inode; 4306 struct list_head splice; 4307 4308 INIT_LIST_HEAD(&splice); 4309 4310 spin_lock(&root->delalloc_lock); 4311 list_splice_init(&root->delalloc_inodes, &splice); 4312 4313 while (!list_empty(&splice)) { 4314 btrfs_inode = list_first_entry(&splice, struct btrfs_inode, 4315 delalloc_inodes); 4316 4317 list_del_init(&btrfs_inode->delalloc_inodes); 4318 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 4319 &btrfs_inode->runtime_flags); 4320 spin_unlock(&root->delalloc_lock); 4321 4322 btrfs_invalidate_inodes(btrfs_inode->root); 4323 4324 spin_lock(&root->delalloc_lock); 4325 } 4326 4327 spin_unlock(&root->delalloc_lock); 4328 } 4329 4330 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) 4331 { 4332 struct btrfs_root *root; 4333 struct list_head splice; 4334 4335 INIT_LIST_HEAD(&splice); 4336 4337 spin_lock(&fs_info->delalloc_root_lock); 4338 list_splice_init(&fs_info->delalloc_roots, &splice); 4339 while (!list_empty(&splice)) { 4340 root = list_first_entry(&splice, struct btrfs_root, 4341 delalloc_root); 4342 list_del_init(&root->delalloc_root); 4343 root = btrfs_grab_fs_root(root); 4344 BUG_ON(!root); 4345 spin_unlock(&fs_info->delalloc_root_lock); 4346 4347 btrfs_destroy_delalloc_inodes(root); 4348 btrfs_put_fs_root(root); 4349 4350 spin_lock(&fs_info->delalloc_root_lock); 4351 } 4352 spin_unlock(&fs_info->delalloc_root_lock); 4353 } 4354 4355 static int btrfs_destroy_marked_extents(struct btrfs_root *root, 4356 struct extent_io_tree *dirty_pages, 4357 int mark) 4358 { 4359 int ret; 4360 struct extent_buffer *eb; 4361 u64 start = 0; 4362 u64 end; 4363 4364 while (1) { 4365 ret = find_first_extent_bit(dirty_pages, start, &start, &end, 4366 mark, NULL); 4367 if (ret) 4368 break; 4369 4370 clear_extent_bits(dirty_pages, start, end, mark); 4371 while (start <= end) { 4372 eb = btrfs_find_tree_block(root->fs_info, start); 4373 start += root->nodesize; 4374 if (!eb) 4375 continue; 4376 wait_on_extent_buffer_writeback(eb); 4377 4378 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, 4379 &eb->bflags)) 4380 clear_extent_buffer_dirty(eb); 4381 free_extent_buffer_stale(eb); 4382 } 4383 } 4384 4385 return ret; 4386 } 4387 4388 static int btrfs_destroy_pinned_extent(struct btrfs_root *root, 4389 struct extent_io_tree *pinned_extents) 4390 { 4391 struct extent_io_tree *unpin; 4392 u64 start; 4393 u64 end; 4394 int ret; 4395 bool loop = true; 4396 4397 unpin = pinned_extents; 4398 again: 4399 while (1) { 4400 ret = find_first_extent_bit(unpin, 0, &start, &end, 4401 EXTENT_DIRTY, NULL); 4402 if (ret) 4403 break; 4404 4405 clear_extent_dirty(unpin, start, end); 4406 btrfs_error_unpin_extent_range(root, start, end); 4407 cond_resched(); 4408 } 4409 4410 if (loop) { 4411 if (unpin == &root->fs_info->freed_extents[0]) 4412 unpin = &root->fs_info->freed_extents[1]; 4413 else 4414 unpin = &root->fs_info->freed_extents[0]; 4415 loop = false; 4416 goto again; 4417 } 4418 4419 return 0; 4420 } 4421 4422 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, 4423 struct btrfs_root *root) 4424 { 4425 btrfs_destroy_delayed_refs(cur_trans, root); 4426 4427 cur_trans->state = TRANS_STATE_COMMIT_START; 4428 wake_up(&root->fs_info->transaction_blocked_wait); 4429 4430 cur_trans->state = TRANS_STATE_UNBLOCKED; 4431 wake_up(&root->fs_info->transaction_wait); 4432 4433 btrfs_destroy_delayed_inodes(root); 4434 btrfs_assert_delayed_root_empty(root); 4435 4436 btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, 4437 EXTENT_DIRTY); 4438 btrfs_destroy_pinned_extent(root, 4439 root->fs_info->pinned_extents); 4440 4441 cur_trans->state =TRANS_STATE_COMPLETED; 4442 wake_up(&cur_trans->commit_wait); 4443 4444 /* 4445 memset(cur_trans, 0, sizeof(*cur_trans)); 4446 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 4447 */ 4448 } 4449 4450 static int btrfs_cleanup_transaction(struct btrfs_root *root) 4451 { 4452 struct btrfs_transaction *t; 4453 4454 mutex_lock(&root->fs_info->transaction_kthread_mutex); 4455 4456 spin_lock(&root->fs_info->trans_lock); 4457 while (!list_empty(&root->fs_info->trans_list)) { 4458 t = list_first_entry(&root->fs_info->trans_list, 4459 struct btrfs_transaction, list); 4460 if (t->state >= TRANS_STATE_COMMIT_START) { 4461 atomic_inc(&t->use_count); 4462 spin_unlock(&root->fs_info->trans_lock); 4463 btrfs_wait_for_commit(root, t->transid); 4464 btrfs_put_transaction(t); 4465 spin_lock(&root->fs_info->trans_lock); 4466 continue; 4467 } 4468 if (t == root->fs_info->running_transaction) { 4469 t->state = TRANS_STATE_COMMIT_DOING; 4470 spin_unlock(&root->fs_info->trans_lock); 4471 /* 4472 * We wait for 0 num_writers since we don't hold a trans 4473 * handle open currently for this transaction. 4474 */ 4475 wait_event(t->writer_wait, 4476 atomic_read(&t->num_writers) == 0); 4477 } else { 4478 spin_unlock(&root->fs_info->trans_lock); 4479 } 4480 btrfs_cleanup_one_transaction(t, root); 4481 4482 spin_lock(&root->fs_info->trans_lock); 4483 if (t == root->fs_info->running_transaction) 4484 root->fs_info->running_transaction = NULL; 4485 list_del_init(&t->list); 4486 spin_unlock(&root->fs_info->trans_lock); 4487 4488 btrfs_put_transaction(t); 4489 trace_btrfs_transaction_commit(root); 4490 spin_lock(&root->fs_info->trans_lock); 4491 } 4492 spin_unlock(&root->fs_info->trans_lock); 4493 btrfs_destroy_all_ordered_extents(root->fs_info); 4494 btrfs_destroy_delayed_inodes(root); 4495 btrfs_assert_delayed_root_empty(root); 4496 btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); 4497 btrfs_destroy_all_delalloc_inodes(root->fs_info); 4498 mutex_unlock(&root->fs_info->transaction_kthread_mutex); 4499 4500 return 0; 4501 } 4502 4503 static const struct extent_io_ops btree_extent_io_ops = { 4504 .readpage_end_io_hook = btree_readpage_end_io_hook, 4505 .readpage_io_failed_hook = btree_io_failed_hook, 4506 .submit_bio_hook = btree_submit_bio_hook, 4507 /* note we're sharing with inode.c for the merge bio hook */ 4508 .merge_bio_hook = btrfs_merge_bio_hook, 4509 }; 4510