1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/blkdev.h> 21 #include <linux/scatterlist.h> 22 #include <linux/swap.h> 23 #include <linux/radix-tree.h> 24 #include <linux/writeback.h> 25 #include <linux/buffer_head.h> 26 #include <linux/workqueue.h> 27 #include <linux/kthread.h> 28 #include <linux/slab.h> 29 #include <linux/migrate.h> 30 #include <linux/ratelimit.h> 31 #include <linux/uuid.h> 32 #include <linux/semaphore.h> 33 #include <linux/bpf.h> 34 #include <asm/unaligned.h> 35 #include "ctree.h" 36 #include "disk-io.h" 37 #include "hash.h" 38 #include "transaction.h" 39 #include "btrfs_inode.h" 40 #include "volumes.h" 41 #include "print-tree.h" 42 #include "locking.h" 43 #include "tree-log.h" 44 #include "free-space-cache.h" 45 #include "free-space-tree.h" 46 #include "inode-map.h" 47 #include "check-integrity.h" 48 #include "rcu-string.h" 49 #include "dev-replace.h" 50 #include "raid56.h" 51 #include "sysfs.h" 52 #include "qgroup.h" 53 #include "compression.h" 54 #include "tree-checker.h" 55 #include "ref-verify.h" 56 57 #ifdef CONFIG_X86 58 #include <asm/cpufeature.h> 59 #endif 60 61 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ 62 BTRFS_HEADER_FLAG_RELOC |\ 63 BTRFS_SUPER_FLAG_ERROR |\ 64 BTRFS_SUPER_FLAG_SEEDING |\ 65 BTRFS_SUPER_FLAG_METADUMP) 66 67 static const struct extent_io_ops btree_extent_io_ops; 68 static void end_workqueue_fn(struct btrfs_work *work); 69 static void free_fs_root(struct btrfs_root *root); 70 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info); 71 static void btrfs_destroy_ordered_extents(struct btrfs_root *root); 72 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 73 struct btrfs_fs_info *fs_info); 74 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); 75 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, 76 struct extent_io_tree *dirty_pages, 77 int mark); 78 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, 79 struct extent_io_tree *pinned_extents); 80 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info); 81 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info); 82 83 /* 84 * btrfs_end_io_wq structs are used to do processing in task context when an IO 85 * is complete. This is used during reads to verify checksums, and it is used 86 * by writes to insert metadata for new file extents after IO is complete. 87 */ 88 struct btrfs_end_io_wq { 89 struct bio *bio; 90 bio_end_io_t *end_io; 91 void *private; 92 struct btrfs_fs_info *info; 93 blk_status_t status; 94 enum btrfs_wq_endio_type metadata; 95 struct btrfs_work work; 96 }; 97 98 static struct kmem_cache *btrfs_end_io_wq_cache; 99 100 int __init btrfs_end_io_wq_init(void) 101 { 102 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq", 103 sizeof(struct btrfs_end_io_wq), 104 0, 105 SLAB_MEM_SPREAD, 106 NULL); 107 if (!btrfs_end_io_wq_cache) 108 return -ENOMEM; 109 return 0; 110 } 111 112 void btrfs_end_io_wq_exit(void) 113 { 114 kmem_cache_destroy(btrfs_end_io_wq_cache); 115 } 116 117 /* 118 * async submit bios are used to offload expensive checksumming 119 * onto the worker threads. They checksum file and metadata bios 120 * just before they are sent down the IO stack. 121 */ 122 struct async_submit_bio { 123 void *private_data; 124 struct btrfs_fs_info *fs_info; 125 struct bio *bio; 126 extent_submit_bio_hook_t *submit_bio_start; 127 extent_submit_bio_hook_t *submit_bio_done; 128 int mirror_num; 129 unsigned long bio_flags; 130 /* 131 * bio_offset is optional, can be used if the pages in the bio 132 * can't tell us where in the file the bio should go 133 */ 134 u64 bio_offset; 135 struct btrfs_work work; 136 blk_status_t status; 137 }; 138 139 /* 140 * Lockdep class keys for extent_buffer->lock's in this root. For a given 141 * eb, the lockdep key is determined by the btrfs_root it belongs to and 142 * the level the eb occupies in the tree. 143 * 144 * Different roots are used for different purposes and may nest inside each 145 * other and they require separate keysets. As lockdep keys should be 146 * static, assign keysets according to the purpose of the root as indicated 147 * by btrfs_root->objectid. This ensures that all special purpose roots 148 * have separate keysets. 149 * 150 * Lock-nesting across peer nodes is always done with the immediate parent 151 * node locked thus preventing deadlock. As lockdep doesn't know this, use 152 * subclass to avoid triggering lockdep warning in such cases. 153 * 154 * The key is set by the readpage_end_io_hook after the buffer has passed 155 * csum validation but before the pages are unlocked. It is also set by 156 * btrfs_init_new_buffer on freshly allocated blocks. 157 * 158 * We also add a check to make sure the highest level of the tree is the 159 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code 160 * needs update as well. 161 */ 162 #ifdef CONFIG_DEBUG_LOCK_ALLOC 163 # if BTRFS_MAX_LEVEL != 8 164 # error 165 # endif 166 167 static struct btrfs_lockdep_keyset { 168 u64 id; /* root objectid */ 169 const char *name_stem; /* lock name stem */ 170 char names[BTRFS_MAX_LEVEL + 1][20]; 171 struct lock_class_key keys[BTRFS_MAX_LEVEL + 1]; 172 } btrfs_lockdep_keysets[] = { 173 { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" }, 174 { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" }, 175 { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" }, 176 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, 177 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, 178 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, 179 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, 180 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, 181 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, 182 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, 183 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, 184 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" }, 185 { .id = 0, .name_stem = "tree" }, 186 }; 187 188 void __init btrfs_init_lockdep(void) 189 { 190 int i, j; 191 192 /* initialize lockdep class names */ 193 for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { 194 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; 195 196 for (j = 0; j < ARRAY_SIZE(ks->names); j++) 197 snprintf(ks->names[j], sizeof(ks->names[j]), 198 "btrfs-%s-%02d", ks->name_stem, j); 199 } 200 } 201 202 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, 203 int level) 204 { 205 struct btrfs_lockdep_keyset *ks; 206 207 BUG_ON(level >= ARRAY_SIZE(ks->keys)); 208 209 /* find the matching keyset, id 0 is the default entry */ 210 for (ks = btrfs_lockdep_keysets; ks->id; ks++) 211 if (ks->id == objectid) 212 break; 213 214 lockdep_set_class_and_name(&eb->lock, 215 &ks->keys[level], ks->names[level]); 216 } 217 218 #endif 219 220 /* 221 * extents on the btree inode are pretty simple, there's one extent 222 * that covers the entire device 223 */ 224 static struct extent_map *btree_get_extent(struct btrfs_inode *inode, 225 struct page *page, size_t pg_offset, u64 start, u64 len, 226 int create) 227 { 228 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); 229 struct extent_map_tree *em_tree = &inode->extent_tree; 230 struct extent_map *em; 231 int ret; 232 233 read_lock(&em_tree->lock); 234 em = lookup_extent_mapping(em_tree, start, len); 235 if (em) { 236 em->bdev = fs_info->fs_devices->latest_bdev; 237 read_unlock(&em_tree->lock); 238 goto out; 239 } 240 read_unlock(&em_tree->lock); 241 242 em = alloc_extent_map(); 243 if (!em) { 244 em = ERR_PTR(-ENOMEM); 245 goto out; 246 } 247 em->start = 0; 248 em->len = (u64)-1; 249 em->block_len = (u64)-1; 250 em->block_start = 0; 251 em->bdev = fs_info->fs_devices->latest_bdev; 252 253 write_lock(&em_tree->lock); 254 ret = add_extent_mapping(em_tree, em, 0); 255 if (ret == -EEXIST) { 256 free_extent_map(em); 257 em = lookup_extent_mapping(em_tree, start, len); 258 if (!em) 259 em = ERR_PTR(-EIO); 260 } else if (ret) { 261 free_extent_map(em); 262 em = ERR_PTR(ret); 263 } 264 write_unlock(&em_tree->lock); 265 266 out: 267 return em; 268 } 269 270 u32 btrfs_csum_data(const char *data, u32 seed, size_t len) 271 { 272 return btrfs_crc32c(seed, data, len); 273 } 274 275 void btrfs_csum_final(u32 crc, u8 *result) 276 { 277 put_unaligned_le32(~crc, result); 278 } 279 280 /* 281 * compute the csum for a btree block, and either verify it or write it 282 * into the csum field of the block. 283 */ 284 static int csum_tree_block(struct btrfs_fs_info *fs_info, 285 struct extent_buffer *buf, 286 int verify) 287 { 288 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 289 char *result = NULL; 290 unsigned long len; 291 unsigned long cur_len; 292 unsigned long offset = BTRFS_CSUM_SIZE; 293 char *kaddr; 294 unsigned long map_start; 295 unsigned long map_len; 296 int err; 297 u32 crc = ~(u32)0; 298 unsigned long inline_result; 299 300 len = buf->len - offset; 301 while (len > 0) { 302 err = map_private_extent_buffer(buf, offset, 32, 303 &kaddr, &map_start, &map_len); 304 if (err) 305 return err; 306 cur_len = min(len, map_len - (offset - map_start)); 307 crc = btrfs_csum_data(kaddr + offset - map_start, 308 crc, cur_len); 309 len -= cur_len; 310 offset += cur_len; 311 } 312 if (csum_size > sizeof(inline_result)) { 313 result = kzalloc(csum_size, GFP_NOFS); 314 if (!result) 315 return -ENOMEM; 316 } else { 317 result = (char *)&inline_result; 318 } 319 320 btrfs_csum_final(crc, result); 321 322 if (verify) { 323 if (memcmp_extent_buffer(buf, result, 0, csum_size)) { 324 u32 val; 325 u32 found = 0; 326 memcpy(&found, result, csum_size); 327 328 read_extent_buffer(buf, &val, 0, csum_size); 329 btrfs_warn_rl(fs_info, 330 "%s checksum verify failed on %llu wanted %X found %X level %d", 331 fs_info->sb->s_id, buf->start, 332 val, found, btrfs_header_level(buf)); 333 if (result != (char *)&inline_result) 334 kfree(result); 335 return -EUCLEAN; 336 } 337 } else { 338 write_extent_buffer(buf, result, 0, csum_size); 339 } 340 if (result != (char *)&inline_result) 341 kfree(result); 342 return 0; 343 } 344 345 /* 346 * we can't consider a given block up to date unless the transid of the 347 * block matches the transid in the parent node's pointer. This is how we 348 * detect blocks that either didn't get written at all or got written 349 * in the wrong place. 350 */ 351 static int verify_parent_transid(struct extent_io_tree *io_tree, 352 struct extent_buffer *eb, u64 parent_transid, 353 int atomic) 354 { 355 struct extent_state *cached_state = NULL; 356 int ret; 357 bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB); 358 359 if (!parent_transid || btrfs_header_generation(eb) == parent_transid) 360 return 0; 361 362 if (atomic) 363 return -EAGAIN; 364 365 if (need_lock) { 366 btrfs_tree_read_lock(eb); 367 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 368 } 369 370 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 371 &cached_state); 372 if (extent_buffer_uptodate(eb) && 373 btrfs_header_generation(eb) == parent_transid) { 374 ret = 0; 375 goto out; 376 } 377 btrfs_err_rl(eb->fs_info, 378 "parent transid verify failed on %llu wanted %llu found %llu", 379 eb->start, 380 parent_transid, btrfs_header_generation(eb)); 381 ret = 1; 382 383 /* 384 * Things reading via commit roots that don't have normal protection, 385 * like send, can have a really old block in cache that may point at a 386 * block that has been freed and re-allocated. So don't clear uptodate 387 * if we find an eb that is under IO (dirty/writeback) because we could 388 * end up reading in the stale data and then writing it back out and 389 * making everybody very sad. 390 */ 391 if (!extent_buffer_under_io(eb)) 392 clear_extent_buffer_uptodate(eb); 393 out: 394 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, 395 &cached_state, GFP_NOFS); 396 if (need_lock) 397 btrfs_tree_read_unlock_blocking(eb); 398 return ret; 399 } 400 401 /* 402 * Return 0 if the superblock checksum type matches the checksum value of that 403 * algorithm. Pass the raw disk superblock data. 404 */ 405 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, 406 char *raw_disk_sb) 407 { 408 struct btrfs_super_block *disk_sb = 409 (struct btrfs_super_block *)raw_disk_sb; 410 u16 csum_type = btrfs_super_csum_type(disk_sb); 411 int ret = 0; 412 413 if (csum_type == BTRFS_CSUM_TYPE_CRC32) { 414 u32 crc = ~(u32)0; 415 const int csum_size = sizeof(crc); 416 char result[csum_size]; 417 418 /* 419 * The super_block structure does not span the whole 420 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space 421 * is filled with zeros and is included in the checksum. 422 */ 423 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, 424 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); 425 btrfs_csum_final(crc, result); 426 427 if (memcmp(raw_disk_sb, result, csum_size)) 428 ret = 1; 429 } 430 431 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { 432 btrfs_err(fs_info, "unsupported checksum algorithm %u", 433 csum_type); 434 ret = 1; 435 } 436 437 return ret; 438 } 439 440 /* 441 * helper to read a given tree block, doing retries as required when 442 * the checksums don't match and we have alternate mirrors to try. 443 */ 444 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info, 445 struct extent_buffer *eb, 446 u64 parent_transid) 447 { 448 struct extent_io_tree *io_tree; 449 int failed = 0; 450 int ret; 451 int num_copies = 0; 452 int mirror_num = 0; 453 int failed_mirror = 0; 454 455 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 456 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; 457 while (1) { 458 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE, 459 btree_get_extent, mirror_num); 460 if (!ret) { 461 if (!verify_parent_transid(io_tree, eb, 462 parent_transid, 0)) 463 break; 464 else 465 ret = -EIO; 466 } 467 468 /* 469 * This buffer's crc is fine, but its contents are corrupted, so 470 * there is no reason to read the other copies, they won't be 471 * any less wrong. 472 */ 473 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) 474 break; 475 476 num_copies = btrfs_num_copies(fs_info, 477 eb->start, eb->len); 478 if (num_copies == 1) 479 break; 480 481 if (!failed_mirror) { 482 failed = 1; 483 failed_mirror = eb->read_mirror; 484 } 485 486 mirror_num++; 487 if (mirror_num == failed_mirror) 488 mirror_num++; 489 490 if (mirror_num > num_copies) 491 break; 492 } 493 494 if (failed && !ret && failed_mirror) 495 repair_eb_io_failure(fs_info, eb, failed_mirror); 496 497 return ret; 498 } 499 500 /* 501 * checksum a dirty tree block before IO. This has extra checks to make sure 502 * we only fill in the checksum field in the first page of a multi-page block 503 */ 504 505 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) 506 { 507 u64 start = page_offset(page); 508 u64 found_start; 509 struct extent_buffer *eb; 510 511 eb = (struct extent_buffer *)page->private; 512 if (page != eb->pages[0]) 513 return 0; 514 515 found_start = btrfs_header_bytenr(eb); 516 /* 517 * Please do not consolidate these warnings into a single if. 518 * It is useful to know what went wrong. 519 */ 520 if (WARN_ON(found_start != start)) 521 return -EUCLEAN; 522 if (WARN_ON(!PageUptodate(page))) 523 return -EUCLEAN; 524 525 ASSERT(memcmp_extent_buffer(eb, fs_info->fsid, 526 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0); 527 528 return csum_tree_block(fs_info, eb, 0); 529 } 530 531 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, 532 struct extent_buffer *eb) 533 { 534 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 535 u8 fsid[BTRFS_FSID_SIZE]; 536 int ret = 1; 537 538 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); 539 while (fs_devices) { 540 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) { 541 ret = 0; 542 break; 543 } 544 fs_devices = fs_devices->seed; 545 } 546 return ret; 547 } 548 549 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, 550 u64 phy_offset, struct page *page, 551 u64 start, u64 end, int mirror) 552 { 553 u64 found_start; 554 int found_level; 555 struct extent_buffer *eb; 556 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 557 struct btrfs_fs_info *fs_info = root->fs_info; 558 int ret = 0; 559 int reads_done; 560 561 if (!page->private) 562 goto out; 563 564 eb = (struct extent_buffer *)page->private; 565 566 /* the pending IO might have been the only thing that kept this buffer 567 * in memory. Make sure we have a ref for all this other checks 568 */ 569 extent_buffer_get(eb); 570 571 reads_done = atomic_dec_and_test(&eb->io_pages); 572 if (!reads_done) 573 goto err; 574 575 eb->read_mirror = mirror; 576 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { 577 ret = -EIO; 578 goto err; 579 } 580 581 found_start = btrfs_header_bytenr(eb); 582 if (found_start != eb->start) { 583 btrfs_err_rl(fs_info, "bad tree block start %llu %llu", 584 found_start, eb->start); 585 ret = -EIO; 586 goto err; 587 } 588 if (check_tree_block_fsid(fs_info, eb)) { 589 btrfs_err_rl(fs_info, "bad fsid on block %llu", 590 eb->start); 591 ret = -EIO; 592 goto err; 593 } 594 found_level = btrfs_header_level(eb); 595 if (found_level >= BTRFS_MAX_LEVEL) { 596 btrfs_err(fs_info, "bad tree block level %d", 597 (int)btrfs_header_level(eb)); 598 ret = -EIO; 599 goto err; 600 } 601 602 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), 603 eb, found_level); 604 605 ret = csum_tree_block(fs_info, eb, 1); 606 if (ret) 607 goto err; 608 609 /* 610 * If this is a leaf block and it is corrupt, set the corrupt bit so 611 * that we don't try and read the other copies of this block, just 612 * return -EIO. 613 */ 614 if (found_level == 0 && btrfs_check_leaf_full(root, eb)) { 615 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 616 ret = -EIO; 617 } 618 619 if (found_level > 0 && btrfs_check_node(root, eb)) 620 ret = -EIO; 621 622 if (!ret) 623 set_extent_buffer_uptodate(eb); 624 err: 625 if (reads_done && 626 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) 627 btree_readahead_hook(eb, ret); 628 629 if (ret) { 630 /* 631 * our io error hook is going to dec the io pages 632 * again, we have to make sure it has something 633 * to decrement 634 */ 635 atomic_inc(&eb->io_pages); 636 clear_extent_buffer_uptodate(eb); 637 } 638 free_extent_buffer(eb); 639 out: 640 return ret; 641 } 642 643 static int btree_io_failed_hook(struct page *page, int failed_mirror) 644 { 645 struct extent_buffer *eb; 646 647 eb = (struct extent_buffer *)page->private; 648 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); 649 eb->read_mirror = failed_mirror; 650 atomic_dec(&eb->io_pages); 651 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) 652 btree_readahead_hook(eb, -EIO); 653 return -EIO; /* we fixed nothing */ 654 } 655 656 static void end_workqueue_bio(struct bio *bio) 657 { 658 struct btrfs_end_io_wq *end_io_wq = bio->bi_private; 659 struct btrfs_fs_info *fs_info; 660 struct btrfs_workqueue *wq; 661 btrfs_work_func_t func; 662 663 fs_info = end_io_wq->info; 664 end_io_wq->status = bio->bi_status; 665 666 if (bio_op(bio) == REQ_OP_WRITE) { 667 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { 668 wq = fs_info->endio_meta_write_workers; 669 func = btrfs_endio_meta_write_helper; 670 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { 671 wq = fs_info->endio_freespace_worker; 672 func = btrfs_freespace_write_helper; 673 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { 674 wq = fs_info->endio_raid56_workers; 675 func = btrfs_endio_raid56_helper; 676 } else { 677 wq = fs_info->endio_write_workers; 678 func = btrfs_endio_write_helper; 679 } 680 } else { 681 if (unlikely(end_io_wq->metadata == 682 BTRFS_WQ_ENDIO_DIO_REPAIR)) { 683 wq = fs_info->endio_repair_workers; 684 func = btrfs_endio_repair_helper; 685 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { 686 wq = fs_info->endio_raid56_workers; 687 func = btrfs_endio_raid56_helper; 688 } else if (end_io_wq->metadata) { 689 wq = fs_info->endio_meta_workers; 690 func = btrfs_endio_meta_helper; 691 } else { 692 wq = fs_info->endio_workers; 693 func = btrfs_endio_helper; 694 } 695 } 696 697 btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); 698 btrfs_queue_work(wq, &end_io_wq->work); 699 } 700 701 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 702 enum btrfs_wq_endio_type metadata) 703 { 704 struct btrfs_end_io_wq *end_io_wq; 705 706 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); 707 if (!end_io_wq) 708 return BLK_STS_RESOURCE; 709 710 end_io_wq->private = bio->bi_private; 711 end_io_wq->end_io = bio->bi_end_io; 712 end_io_wq->info = info; 713 end_io_wq->status = 0; 714 end_io_wq->bio = bio; 715 end_io_wq->metadata = metadata; 716 717 bio->bi_private = end_io_wq; 718 bio->bi_end_io = end_workqueue_bio; 719 return 0; 720 } 721 722 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) 723 { 724 unsigned long limit = min_t(unsigned long, 725 info->thread_pool_size, 726 info->fs_devices->open_devices); 727 return 256 * limit; 728 } 729 730 static void run_one_async_start(struct btrfs_work *work) 731 { 732 struct async_submit_bio *async; 733 blk_status_t ret; 734 735 async = container_of(work, struct async_submit_bio, work); 736 ret = async->submit_bio_start(async->private_data, async->bio, 737 async->mirror_num, async->bio_flags, 738 async->bio_offset); 739 if (ret) 740 async->status = ret; 741 } 742 743 static void run_one_async_done(struct btrfs_work *work) 744 { 745 struct async_submit_bio *async; 746 747 async = container_of(work, struct async_submit_bio, work); 748 749 /* If an error occurred we just want to clean up the bio and move on */ 750 if (async->status) { 751 async->bio->bi_status = async->status; 752 bio_endio(async->bio); 753 return; 754 } 755 756 async->submit_bio_done(async->private_data, async->bio, async->mirror_num, 757 async->bio_flags, async->bio_offset); 758 } 759 760 static void run_one_async_free(struct btrfs_work *work) 761 { 762 struct async_submit_bio *async; 763 764 async = container_of(work, struct async_submit_bio, work); 765 kfree(async); 766 } 767 768 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 769 int mirror_num, unsigned long bio_flags, 770 u64 bio_offset, void *private_data, 771 extent_submit_bio_hook_t *submit_bio_start, 772 extent_submit_bio_hook_t *submit_bio_done) 773 { 774 struct async_submit_bio *async; 775 776 async = kmalloc(sizeof(*async), GFP_NOFS); 777 if (!async) 778 return BLK_STS_RESOURCE; 779 780 async->private_data = private_data; 781 async->fs_info = fs_info; 782 async->bio = bio; 783 async->mirror_num = mirror_num; 784 async->submit_bio_start = submit_bio_start; 785 async->submit_bio_done = submit_bio_done; 786 787 btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, 788 run_one_async_done, run_one_async_free); 789 790 async->bio_flags = bio_flags; 791 async->bio_offset = bio_offset; 792 793 async->status = 0; 794 795 if (op_is_sync(bio->bi_opf)) 796 btrfs_set_work_high_priority(&async->work); 797 798 btrfs_queue_work(fs_info->workers, &async->work); 799 return 0; 800 } 801 802 static blk_status_t btree_csum_one_bio(struct bio *bio) 803 { 804 struct bio_vec *bvec; 805 struct btrfs_root *root; 806 int i, ret = 0; 807 808 ASSERT(!bio_flagged(bio, BIO_CLONED)); 809 bio_for_each_segment_all(bvec, bio, i) { 810 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 811 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); 812 if (ret) 813 break; 814 } 815 816 return errno_to_blk_status(ret); 817 } 818 819 static blk_status_t __btree_submit_bio_start(void *private_data, struct bio *bio, 820 int mirror_num, unsigned long bio_flags, 821 u64 bio_offset) 822 { 823 /* 824 * when we're called for a write, we're already in the async 825 * submission context. Just jump into btrfs_map_bio 826 */ 827 return btree_csum_one_bio(bio); 828 } 829 830 static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio, 831 int mirror_num, unsigned long bio_flags, 832 u64 bio_offset) 833 { 834 struct inode *inode = private_data; 835 blk_status_t ret; 836 837 /* 838 * when we're called for a write, we're already in the async 839 * submission context. Just jump into btrfs_map_bio 840 */ 841 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1); 842 if (ret) { 843 bio->bi_status = ret; 844 bio_endio(bio); 845 } 846 return ret; 847 } 848 849 static int check_async_write(struct btrfs_inode *bi) 850 { 851 if (atomic_read(&bi->sync_writers)) 852 return 0; 853 #ifdef CONFIG_X86 854 if (static_cpu_has(X86_FEATURE_XMM4_2)) 855 return 0; 856 #endif 857 return 1; 858 } 859 860 static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio, 861 int mirror_num, unsigned long bio_flags, 862 u64 bio_offset) 863 { 864 struct inode *inode = private_data; 865 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 866 int async = check_async_write(BTRFS_I(inode)); 867 blk_status_t ret; 868 869 if (bio_op(bio) != REQ_OP_WRITE) { 870 /* 871 * called for a read, do the setup so that checksum validation 872 * can happen in the async kernel threads 873 */ 874 ret = btrfs_bio_wq_end_io(fs_info, bio, 875 BTRFS_WQ_ENDIO_METADATA); 876 if (ret) 877 goto out_w_error; 878 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); 879 } else if (!async) { 880 ret = btree_csum_one_bio(bio); 881 if (ret) 882 goto out_w_error; 883 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); 884 } else { 885 /* 886 * kthread helpers are used to submit writes so that 887 * checksumming can happen in parallel across all CPUs 888 */ 889 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0, 890 bio_offset, private_data, 891 __btree_submit_bio_start, 892 __btree_submit_bio_done); 893 } 894 895 if (ret) 896 goto out_w_error; 897 return 0; 898 899 out_w_error: 900 bio->bi_status = ret; 901 bio_endio(bio); 902 return ret; 903 } 904 905 #ifdef CONFIG_MIGRATION 906 static int btree_migratepage(struct address_space *mapping, 907 struct page *newpage, struct page *page, 908 enum migrate_mode mode) 909 { 910 /* 911 * we can't safely write a btree page from here, 912 * we haven't done the locking hook 913 */ 914 if (PageDirty(page)) 915 return -EAGAIN; 916 /* 917 * Buffers may be managed in a filesystem specific way. 918 * We must have no buffers or drop them. 919 */ 920 if (page_has_private(page) && 921 !try_to_release_page(page, GFP_KERNEL)) 922 return -EAGAIN; 923 return migrate_page(mapping, newpage, page, mode); 924 } 925 #endif 926 927 928 static int btree_writepages(struct address_space *mapping, 929 struct writeback_control *wbc) 930 { 931 struct btrfs_fs_info *fs_info; 932 int ret; 933 934 if (wbc->sync_mode == WB_SYNC_NONE) { 935 936 if (wbc->for_kupdate) 937 return 0; 938 939 fs_info = BTRFS_I(mapping->host)->root->fs_info; 940 /* this is a bit racy, but that's ok */ 941 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, 942 BTRFS_DIRTY_METADATA_THRESH); 943 if (ret < 0) 944 return 0; 945 } 946 return btree_write_cache_pages(mapping, wbc); 947 } 948 949 static int btree_readpage(struct file *file, struct page *page) 950 { 951 struct extent_io_tree *tree; 952 tree = &BTRFS_I(page->mapping->host)->io_tree; 953 return extent_read_full_page(tree, page, btree_get_extent, 0); 954 } 955 956 static int btree_releasepage(struct page *page, gfp_t gfp_flags) 957 { 958 if (PageWriteback(page) || PageDirty(page)) 959 return 0; 960 961 return try_release_extent_buffer(page); 962 } 963 964 static void btree_invalidatepage(struct page *page, unsigned int offset, 965 unsigned int length) 966 { 967 struct extent_io_tree *tree; 968 tree = &BTRFS_I(page->mapping->host)->io_tree; 969 extent_invalidatepage(tree, page, offset); 970 btree_releasepage(page, GFP_NOFS); 971 if (PagePrivate(page)) { 972 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, 973 "page private not zero on page %llu", 974 (unsigned long long)page_offset(page)); 975 ClearPagePrivate(page); 976 set_page_private(page, 0); 977 put_page(page); 978 } 979 } 980 981 static int btree_set_page_dirty(struct page *page) 982 { 983 #ifdef DEBUG 984 struct extent_buffer *eb; 985 986 BUG_ON(!PagePrivate(page)); 987 eb = (struct extent_buffer *)page->private; 988 BUG_ON(!eb); 989 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); 990 BUG_ON(!atomic_read(&eb->refs)); 991 btrfs_assert_tree_locked(eb); 992 #endif 993 return __set_page_dirty_nobuffers(page); 994 } 995 996 static const struct address_space_operations btree_aops = { 997 .readpage = btree_readpage, 998 .writepages = btree_writepages, 999 .releasepage = btree_releasepage, 1000 .invalidatepage = btree_invalidatepage, 1001 #ifdef CONFIG_MIGRATION 1002 .migratepage = btree_migratepage, 1003 #endif 1004 .set_page_dirty = btree_set_page_dirty, 1005 }; 1006 1007 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr) 1008 { 1009 struct extent_buffer *buf = NULL; 1010 struct inode *btree_inode = fs_info->btree_inode; 1011 1012 buf = btrfs_find_create_tree_block(fs_info, bytenr); 1013 if (IS_ERR(buf)) 1014 return; 1015 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, 1016 buf, WAIT_NONE, btree_get_extent, 0); 1017 free_extent_buffer(buf); 1018 } 1019 1020 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, 1021 int mirror_num, struct extent_buffer **eb) 1022 { 1023 struct extent_buffer *buf = NULL; 1024 struct inode *btree_inode = fs_info->btree_inode; 1025 struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; 1026 int ret; 1027 1028 buf = btrfs_find_create_tree_block(fs_info, bytenr); 1029 if (IS_ERR(buf)) 1030 return 0; 1031 1032 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); 1033 1034 ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK, 1035 btree_get_extent, mirror_num); 1036 if (ret) { 1037 free_extent_buffer(buf); 1038 return ret; 1039 } 1040 1041 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { 1042 free_extent_buffer(buf); 1043 return -EIO; 1044 } else if (extent_buffer_uptodate(buf)) { 1045 *eb = buf; 1046 } else { 1047 free_extent_buffer(buf); 1048 } 1049 return 0; 1050 } 1051 1052 struct extent_buffer *btrfs_find_create_tree_block( 1053 struct btrfs_fs_info *fs_info, 1054 u64 bytenr) 1055 { 1056 if (btrfs_is_testing(fs_info)) 1057 return alloc_test_extent_buffer(fs_info, bytenr); 1058 return alloc_extent_buffer(fs_info, bytenr); 1059 } 1060 1061 1062 int btrfs_write_tree_block(struct extent_buffer *buf) 1063 { 1064 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, 1065 buf->start + buf->len - 1); 1066 } 1067 1068 void btrfs_wait_tree_block_writeback(struct extent_buffer *buf) 1069 { 1070 filemap_fdatawait_range(buf->pages[0]->mapping, 1071 buf->start, buf->start + buf->len - 1); 1072 } 1073 1074 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, 1075 u64 parent_transid) 1076 { 1077 struct extent_buffer *buf = NULL; 1078 int ret; 1079 1080 buf = btrfs_find_create_tree_block(fs_info, bytenr); 1081 if (IS_ERR(buf)) 1082 return buf; 1083 1084 ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid); 1085 if (ret) { 1086 free_extent_buffer(buf); 1087 return ERR_PTR(ret); 1088 } 1089 return buf; 1090 1091 } 1092 1093 void clean_tree_block(struct btrfs_fs_info *fs_info, 1094 struct extent_buffer *buf) 1095 { 1096 if (btrfs_header_generation(buf) == 1097 fs_info->running_transaction->transid) { 1098 btrfs_assert_tree_locked(buf); 1099 1100 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { 1101 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, 1102 -buf->len, 1103 fs_info->dirty_metadata_batch); 1104 /* ugh, clear_extent_buffer_dirty needs to lock the page */ 1105 btrfs_set_lock_blocking(buf); 1106 clear_extent_buffer_dirty(buf); 1107 } 1108 } 1109 } 1110 1111 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) 1112 { 1113 struct btrfs_subvolume_writers *writers; 1114 int ret; 1115 1116 writers = kmalloc(sizeof(*writers), GFP_NOFS); 1117 if (!writers) 1118 return ERR_PTR(-ENOMEM); 1119 1120 ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL); 1121 if (ret < 0) { 1122 kfree(writers); 1123 return ERR_PTR(ret); 1124 } 1125 1126 init_waitqueue_head(&writers->wait); 1127 return writers; 1128 } 1129 1130 static void 1131 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers) 1132 { 1133 percpu_counter_destroy(&writers->counter); 1134 kfree(writers); 1135 } 1136 1137 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, 1138 u64 objectid) 1139 { 1140 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); 1141 root->node = NULL; 1142 root->commit_root = NULL; 1143 root->state = 0; 1144 root->orphan_cleanup_state = 0; 1145 1146 root->objectid = objectid; 1147 root->last_trans = 0; 1148 root->highest_objectid = 0; 1149 root->nr_delalloc_inodes = 0; 1150 root->nr_ordered_extents = 0; 1151 root->name = NULL; 1152 root->inode_tree = RB_ROOT; 1153 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); 1154 root->block_rsv = NULL; 1155 root->orphan_block_rsv = NULL; 1156 1157 INIT_LIST_HEAD(&root->dirty_list); 1158 INIT_LIST_HEAD(&root->root_list); 1159 INIT_LIST_HEAD(&root->delalloc_inodes); 1160 INIT_LIST_HEAD(&root->delalloc_root); 1161 INIT_LIST_HEAD(&root->ordered_extents); 1162 INIT_LIST_HEAD(&root->ordered_root); 1163 INIT_LIST_HEAD(&root->logged_list[0]); 1164 INIT_LIST_HEAD(&root->logged_list[1]); 1165 spin_lock_init(&root->orphan_lock); 1166 spin_lock_init(&root->inode_lock); 1167 spin_lock_init(&root->delalloc_lock); 1168 spin_lock_init(&root->ordered_extent_lock); 1169 spin_lock_init(&root->accounting_lock); 1170 spin_lock_init(&root->log_extents_lock[0]); 1171 spin_lock_init(&root->log_extents_lock[1]); 1172 mutex_init(&root->objectid_mutex); 1173 mutex_init(&root->log_mutex); 1174 mutex_init(&root->ordered_extent_mutex); 1175 mutex_init(&root->delalloc_mutex); 1176 init_waitqueue_head(&root->log_writer_wait); 1177 init_waitqueue_head(&root->log_commit_wait[0]); 1178 init_waitqueue_head(&root->log_commit_wait[1]); 1179 INIT_LIST_HEAD(&root->log_ctxs[0]); 1180 INIT_LIST_HEAD(&root->log_ctxs[1]); 1181 atomic_set(&root->log_commit[0], 0); 1182 atomic_set(&root->log_commit[1], 0); 1183 atomic_set(&root->log_writers, 0); 1184 atomic_set(&root->log_batch, 0); 1185 atomic_set(&root->orphan_inodes, 0); 1186 refcount_set(&root->refs, 1); 1187 atomic_set(&root->will_be_snapshotted, 0); 1188 atomic64_set(&root->qgroup_meta_rsv, 0); 1189 root->log_transid = 0; 1190 root->log_transid_committed = -1; 1191 root->last_log_commit = 0; 1192 if (!dummy) 1193 extent_io_tree_init(&root->dirty_log_pages, NULL); 1194 1195 memset(&root->root_key, 0, sizeof(root->root_key)); 1196 memset(&root->root_item, 0, sizeof(root->root_item)); 1197 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); 1198 if (!dummy) 1199 root->defrag_trans_start = fs_info->generation; 1200 else 1201 root->defrag_trans_start = 0; 1202 root->root_key.objectid = objectid; 1203 root->anon_dev = 0; 1204 1205 spin_lock_init(&root->root_item_lock); 1206 } 1207 1208 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, 1209 gfp_t flags) 1210 { 1211 struct btrfs_root *root = kzalloc(sizeof(*root), flags); 1212 if (root) 1213 root->fs_info = fs_info; 1214 return root; 1215 } 1216 1217 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1218 /* Should only be used by the testing infrastructure */ 1219 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info) 1220 { 1221 struct btrfs_root *root; 1222 1223 if (!fs_info) 1224 return ERR_PTR(-EINVAL); 1225 1226 root = btrfs_alloc_root(fs_info, GFP_KERNEL); 1227 if (!root) 1228 return ERR_PTR(-ENOMEM); 1229 1230 /* We don't use the stripesize in selftest, set it as sectorsize */ 1231 __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID); 1232 root->alloc_bytenr = 0; 1233 1234 return root; 1235 } 1236 #endif 1237 1238 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, 1239 struct btrfs_fs_info *fs_info, 1240 u64 objectid) 1241 { 1242 struct extent_buffer *leaf; 1243 struct btrfs_root *tree_root = fs_info->tree_root; 1244 struct btrfs_root *root; 1245 struct btrfs_key key; 1246 int ret = 0; 1247 uuid_le uuid; 1248 1249 root = btrfs_alloc_root(fs_info, GFP_KERNEL); 1250 if (!root) 1251 return ERR_PTR(-ENOMEM); 1252 1253 __setup_root(root, fs_info, objectid); 1254 root->root_key.objectid = objectid; 1255 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 1256 root->root_key.offset = 0; 1257 1258 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); 1259 if (IS_ERR(leaf)) { 1260 ret = PTR_ERR(leaf); 1261 leaf = NULL; 1262 goto fail; 1263 } 1264 1265 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header)); 1266 btrfs_set_header_bytenr(leaf, leaf->start); 1267 btrfs_set_header_generation(leaf, trans->transid); 1268 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); 1269 btrfs_set_header_owner(leaf, objectid); 1270 root->node = leaf; 1271 1272 write_extent_buffer_fsid(leaf, fs_info->fsid); 1273 write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid); 1274 btrfs_mark_buffer_dirty(leaf); 1275 1276 root->commit_root = btrfs_root_node(root); 1277 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 1278 1279 root->root_item.flags = 0; 1280 root->root_item.byte_limit = 0; 1281 btrfs_set_root_bytenr(&root->root_item, leaf->start); 1282 btrfs_set_root_generation(&root->root_item, trans->transid); 1283 btrfs_set_root_level(&root->root_item, 0); 1284 btrfs_set_root_refs(&root->root_item, 1); 1285 btrfs_set_root_used(&root->root_item, leaf->len); 1286 btrfs_set_root_last_snapshot(&root->root_item, 0); 1287 btrfs_set_root_dirid(&root->root_item, 0); 1288 uuid_le_gen(&uuid); 1289 memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); 1290 root->root_item.drop_level = 0; 1291 1292 key.objectid = objectid; 1293 key.type = BTRFS_ROOT_ITEM_KEY; 1294 key.offset = 0; 1295 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); 1296 if (ret) 1297 goto fail; 1298 1299 btrfs_tree_unlock(leaf); 1300 1301 return root; 1302 1303 fail: 1304 if (leaf) { 1305 btrfs_tree_unlock(leaf); 1306 free_extent_buffer(root->commit_root); 1307 free_extent_buffer(leaf); 1308 } 1309 kfree(root); 1310 1311 return ERR_PTR(ret); 1312 } 1313 1314 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, 1315 struct btrfs_fs_info *fs_info) 1316 { 1317 struct btrfs_root *root; 1318 struct extent_buffer *leaf; 1319 1320 root = btrfs_alloc_root(fs_info, GFP_NOFS); 1321 if (!root) 1322 return ERR_PTR(-ENOMEM); 1323 1324 __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID); 1325 1326 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; 1327 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 1328 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; 1329 1330 /* 1331 * DON'T set REF_COWS for log trees 1332 * 1333 * log trees do not get reference counted because they go away 1334 * before a real commit is actually done. They do store pointers 1335 * to file data extents, and those reference counts still get 1336 * updated (along with back refs to the log tree). 1337 */ 1338 1339 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, 1340 NULL, 0, 0, 0); 1341 if (IS_ERR(leaf)) { 1342 kfree(root); 1343 return ERR_CAST(leaf); 1344 } 1345 1346 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header)); 1347 btrfs_set_header_bytenr(leaf, leaf->start); 1348 btrfs_set_header_generation(leaf, trans->transid); 1349 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); 1350 btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID); 1351 root->node = leaf; 1352 1353 write_extent_buffer_fsid(root->node, fs_info->fsid); 1354 btrfs_mark_buffer_dirty(root->node); 1355 btrfs_tree_unlock(root->node); 1356 return root; 1357 } 1358 1359 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, 1360 struct btrfs_fs_info *fs_info) 1361 { 1362 struct btrfs_root *log_root; 1363 1364 log_root = alloc_log_tree(trans, fs_info); 1365 if (IS_ERR(log_root)) 1366 return PTR_ERR(log_root); 1367 WARN_ON(fs_info->log_root_tree); 1368 fs_info->log_root_tree = log_root; 1369 return 0; 1370 } 1371 1372 int btrfs_add_log_tree(struct btrfs_trans_handle *trans, 1373 struct btrfs_root *root) 1374 { 1375 struct btrfs_fs_info *fs_info = root->fs_info; 1376 struct btrfs_root *log_root; 1377 struct btrfs_inode_item *inode_item; 1378 1379 log_root = alloc_log_tree(trans, fs_info); 1380 if (IS_ERR(log_root)) 1381 return PTR_ERR(log_root); 1382 1383 log_root->last_trans = trans->transid; 1384 log_root->root_key.offset = root->root_key.objectid; 1385 1386 inode_item = &log_root->root_item.inode; 1387 btrfs_set_stack_inode_generation(inode_item, 1); 1388 btrfs_set_stack_inode_size(inode_item, 3); 1389 btrfs_set_stack_inode_nlink(inode_item, 1); 1390 btrfs_set_stack_inode_nbytes(inode_item, 1391 fs_info->nodesize); 1392 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); 1393 1394 btrfs_set_root_node(&log_root->root_item, log_root->node); 1395 1396 WARN_ON(root->log_root); 1397 root->log_root = log_root; 1398 root->log_transid = 0; 1399 root->log_transid_committed = -1; 1400 root->last_log_commit = 0; 1401 return 0; 1402 } 1403 1404 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, 1405 struct btrfs_key *key) 1406 { 1407 struct btrfs_root *root; 1408 struct btrfs_fs_info *fs_info = tree_root->fs_info; 1409 struct btrfs_path *path; 1410 u64 generation; 1411 int ret; 1412 1413 path = btrfs_alloc_path(); 1414 if (!path) 1415 return ERR_PTR(-ENOMEM); 1416 1417 root = btrfs_alloc_root(fs_info, GFP_NOFS); 1418 if (!root) { 1419 ret = -ENOMEM; 1420 goto alloc_fail; 1421 } 1422 1423 __setup_root(root, fs_info, key->objectid); 1424 1425 ret = btrfs_find_root(tree_root, key, path, 1426 &root->root_item, &root->root_key); 1427 if (ret) { 1428 if (ret > 0) 1429 ret = -ENOENT; 1430 goto find_fail; 1431 } 1432 1433 generation = btrfs_root_generation(&root->root_item); 1434 root->node = read_tree_block(fs_info, 1435 btrfs_root_bytenr(&root->root_item), 1436 generation); 1437 if (IS_ERR(root->node)) { 1438 ret = PTR_ERR(root->node); 1439 goto find_fail; 1440 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { 1441 ret = -EIO; 1442 free_extent_buffer(root->node); 1443 goto find_fail; 1444 } 1445 root->commit_root = btrfs_root_node(root); 1446 out: 1447 btrfs_free_path(path); 1448 return root; 1449 1450 find_fail: 1451 kfree(root); 1452 alloc_fail: 1453 root = ERR_PTR(ret); 1454 goto out; 1455 } 1456 1457 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, 1458 struct btrfs_key *location) 1459 { 1460 struct btrfs_root *root; 1461 1462 root = btrfs_read_tree_root(tree_root, location); 1463 if (IS_ERR(root)) 1464 return root; 1465 1466 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 1467 set_bit(BTRFS_ROOT_REF_COWS, &root->state); 1468 btrfs_check_and_init_root_item(&root->root_item); 1469 } 1470 1471 return root; 1472 } 1473 1474 int btrfs_init_fs_root(struct btrfs_root *root) 1475 { 1476 int ret; 1477 struct btrfs_subvolume_writers *writers; 1478 1479 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); 1480 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), 1481 GFP_NOFS); 1482 if (!root->free_ino_pinned || !root->free_ino_ctl) { 1483 ret = -ENOMEM; 1484 goto fail; 1485 } 1486 1487 writers = btrfs_alloc_subvolume_writers(); 1488 if (IS_ERR(writers)) { 1489 ret = PTR_ERR(writers); 1490 goto fail; 1491 } 1492 root->subv_writers = writers; 1493 1494 btrfs_init_free_ino_ctl(root); 1495 spin_lock_init(&root->ino_cache_lock); 1496 init_waitqueue_head(&root->ino_cache_wait); 1497 1498 ret = get_anon_bdev(&root->anon_dev); 1499 if (ret) 1500 goto fail; 1501 1502 mutex_lock(&root->objectid_mutex); 1503 ret = btrfs_find_highest_objectid(root, 1504 &root->highest_objectid); 1505 if (ret) { 1506 mutex_unlock(&root->objectid_mutex); 1507 goto fail; 1508 } 1509 1510 ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); 1511 1512 mutex_unlock(&root->objectid_mutex); 1513 1514 return 0; 1515 fail: 1516 /* the caller is responsible to call free_fs_root */ 1517 return ret; 1518 } 1519 1520 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, 1521 u64 root_id) 1522 { 1523 struct btrfs_root *root; 1524 1525 spin_lock(&fs_info->fs_roots_radix_lock); 1526 root = radix_tree_lookup(&fs_info->fs_roots_radix, 1527 (unsigned long)root_id); 1528 spin_unlock(&fs_info->fs_roots_radix_lock); 1529 return root; 1530 } 1531 1532 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, 1533 struct btrfs_root *root) 1534 { 1535 int ret; 1536 1537 ret = radix_tree_preload(GFP_NOFS); 1538 if (ret) 1539 return ret; 1540 1541 spin_lock(&fs_info->fs_roots_radix_lock); 1542 ret = radix_tree_insert(&fs_info->fs_roots_radix, 1543 (unsigned long)root->root_key.objectid, 1544 root); 1545 if (ret == 0) 1546 set_bit(BTRFS_ROOT_IN_RADIX, &root->state); 1547 spin_unlock(&fs_info->fs_roots_radix_lock); 1548 radix_tree_preload_end(); 1549 1550 return ret; 1551 } 1552 1553 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, 1554 struct btrfs_key *location, 1555 bool check_ref) 1556 { 1557 struct btrfs_root *root; 1558 struct btrfs_path *path; 1559 struct btrfs_key key; 1560 int ret; 1561 1562 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) 1563 return fs_info->tree_root; 1564 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) 1565 return fs_info->extent_root; 1566 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) 1567 return fs_info->chunk_root; 1568 if (location->objectid == BTRFS_DEV_TREE_OBJECTID) 1569 return fs_info->dev_root; 1570 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) 1571 return fs_info->csum_root; 1572 if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) 1573 return fs_info->quota_root ? fs_info->quota_root : 1574 ERR_PTR(-ENOENT); 1575 if (location->objectid == BTRFS_UUID_TREE_OBJECTID) 1576 return fs_info->uuid_root ? fs_info->uuid_root : 1577 ERR_PTR(-ENOENT); 1578 if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) 1579 return fs_info->free_space_root ? fs_info->free_space_root : 1580 ERR_PTR(-ENOENT); 1581 again: 1582 root = btrfs_lookup_fs_root(fs_info, location->objectid); 1583 if (root) { 1584 if (check_ref && btrfs_root_refs(&root->root_item) == 0) 1585 return ERR_PTR(-ENOENT); 1586 return root; 1587 } 1588 1589 root = btrfs_read_fs_root(fs_info->tree_root, location); 1590 if (IS_ERR(root)) 1591 return root; 1592 1593 if (check_ref && btrfs_root_refs(&root->root_item) == 0) { 1594 ret = -ENOENT; 1595 goto fail; 1596 } 1597 1598 ret = btrfs_init_fs_root(root); 1599 if (ret) 1600 goto fail; 1601 1602 path = btrfs_alloc_path(); 1603 if (!path) { 1604 ret = -ENOMEM; 1605 goto fail; 1606 } 1607 key.objectid = BTRFS_ORPHAN_OBJECTID; 1608 key.type = BTRFS_ORPHAN_ITEM_KEY; 1609 key.offset = location->objectid; 1610 1611 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 1612 btrfs_free_path(path); 1613 if (ret < 0) 1614 goto fail; 1615 if (ret == 0) 1616 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); 1617 1618 ret = btrfs_insert_fs_root(fs_info, root); 1619 if (ret) { 1620 if (ret == -EEXIST) { 1621 free_fs_root(root); 1622 goto again; 1623 } 1624 goto fail; 1625 } 1626 return root; 1627 fail: 1628 free_fs_root(root); 1629 return ERR_PTR(ret); 1630 } 1631 1632 static int btrfs_congested_fn(void *congested_data, int bdi_bits) 1633 { 1634 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; 1635 int ret = 0; 1636 struct btrfs_device *device; 1637 struct backing_dev_info *bdi; 1638 1639 rcu_read_lock(); 1640 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { 1641 if (!device->bdev) 1642 continue; 1643 bdi = device->bdev->bd_bdi; 1644 if (bdi_congested(bdi, bdi_bits)) { 1645 ret = 1; 1646 break; 1647 } 1648 } 1649 rcu_read_unlock(); 1650 return ret; 1651 } 1652 1653 /* 1654 * called by the kthread helper functions to finally call the bio end_io 1655 * functions. This is where read checksum verification actually happens 1656 */ 1657 static void end_workqueue_fn(struct btrfs_work *work) 1658 { 1659 struct bio *bio; 1660 struct btrfs_end_io_wq *end_io_wq; 1661 1662 end_io_wq = container_of(work, struct btrfs_end_io_wq, work); 1663 bio = end_io_wq->bio; 1664 1665 bio->bi_status = end_io_wq->status; 1666 bio->bi_private = end_io_wq->private; 1667 bio->bi_end_io = end_io_wq->end_io; 1668 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); 1669 bio_endio(bio); 1670 } 1671 1672 static int cleaner_kthread(void *arg) 1673 { 1674 struct btrfs_root *root = arg; 1675 struct btrfs_fs_info *fs_info = root->fs_info; 1676 int again; 1677 struct btrfs_trans_handle *trans; 1678 1679 do { 1680 again = 0; 1681 1682 /* Make the cleaner go to sleep early. */ 1683 if (btrfs_need_cleaner_sleep(fs_info)) 1684 goto sleep; 1685 1686 /* 1687 * Do not do anything if we might cause open_ctree() to block 1688 * before we have finished mounting the filesystem. 1689 */ 1690 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1691 goto sleep; 1692 1693 if (!mutex_trylock(&fs_info->cleaner_mutex)) 1694 goto sleep; 1695 1696 /* 1697 * Avoid the problem that we change the status of the fs 1698 * during the above check and trylock. 1699 */ 1700 if (btrfs_need_cleaner_sleep(fs_info)) { 1701 mutex_unlock(&fs_info->cleaner_mutex); 1702 goto sleep; 1703 } 1704 1705 mutex_lock(&fs_info->cleaner_delayed_iput_mutex); 1706 btrfs_run_delayed_iputs(fs_info); 1707 mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); 1708 1709 again = btrfs_clean_one_deleted_snapshot(root); 1710 mutex_unlock(&fs_info->cleaner_mutex); 1711 1712 /* 1713 * The defragger has dealt with the R/O remount and umount, 1714 * needn't do anything special here. 1715 */ 1716 btrfs_run_defrag_inodes(fs_info); 1717 1718 /* 1719 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing 1720 * with relocation (btrfs_relocate_chunk) and relocation 1721 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) 1722 * after acquiring fs_info->delete_unused_bgs_mutex. So we 1723 * can't hold, nor need to, fs_info->cleaner_mutex when deleting 1724 * unused block groups. 1725 */ 1726 btrfs_delete_unused_bgs(fs_info); 1727 sleep: 1728 if (!again) { 1729 set_current_state(TASK_INTERRUPTIBLE); 1730 if (!kthread_should_stop()) 1731 schedule(); 1732 __set_current_state(TASK_RUNNING); 1733 } 1734 } while (!kthread_should_stop()); 1735 1736 /* 1737 * Transaction kthread is stopped before us and wakes us up. 1738 * However we might have started a new transaction and COWed some 1739 * tree blocks when deleting unused block groups for example. So 1740 * make sure we commit the transaction we started to have a clean 1741 * shutdown when evicting the btree inode - if it has dirty pages 1742 * when we do the final iput() on it, eviction will trigger a 1743 * writeback for it which will fail with null pointer dereferences 1744 * since work queues and other resources were already released and 1745 * destroyed by the time the iput/eviction/writeback is made. 1746 */ 1747 trans = btrfs_attach_transaction(root); 1748 if (IS_ERR(trans)) { 1749 if (PTR_ERR(trans) != -ENOENT) 1750 btrfs_err(fs_info, 1751 "cleaner transaction attach returned %ld", 1752 PTR_ERR(trans)); 1753 } else { 1754 int ret; 1755 1756 ret = btrfs_commit_transaction(trans); 1757 if (ret) 1758 btrfs_err(fs_info, 1759 "cleaner open transaction commit returned %d", 1760 ret); 1761 } 1762 1763 return 0; 1764 } 1765 1766 static int transaction_kthread(void *arg) 1767 { 1768 struct btrfs_root *root = arg; 1769 struct btrfs_fs_info *fs_info = root->fs_info; 1770 struct btrfs_trans_handle *trans; 1771 struct btrfs_transaction *cur; 1772 u64 transid; 1773 unsigned long now; 1774 unsigned long delay; 1775 bool cannot_commit; 1776 1777 do { 1778 cannot_commit = false; 1779 delay = HZ * fs_info->commit_interval; 1780 mutex_lock(&fs_info->transaction_kthread_mutex); 1781 1782 spin_lock(&fs_info->trans_lock); 1783 cur = fs_info->running_transaction; 1784 if (!cur) { 1785 spin_unlock(&fs_info->trans_lock); 1786 goto sleep; 1787 } 1788 1789 now = get_seconds(); 1790 if (cur->state < TRANS_STATE_BLOCKED && 1791 (now < cur->start_time || 1792 now - cur->start_time < fs_info->commit_interval)) { 1793 spin_unlock(&fs_info->trans_lock); 1794 delay = HZ * 5; 1795 goto sleep; 1796 } 1797 transid = cur->transid; 1798 spin_unlock(&fs_info->trans_lock); 1799 1800 /* If the file system is aborted, this will always fail. */ 1801 trans = btrfs_attach_transaction(root); 1802 if (IS_ERR(trans)) { 1803 if (PTR_ERR(trans) != -ENOENT) 1804 cannot_commit = true; 1805 goto sleep; 1806 } 1807 if (transid == trans->transid) { 1808 btrfs_commit_transaction(trans); 1809 } else { 1810 btrfs_end_transaction(trans); 1811 } 1812 sleep: 1813 wake_up_process(fs_info->cleaner_kthread); 1814 mutex_unlock(&fs_info->transaction_kthread_mutex); 1815 1816 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, 1817 &fs_info->fs_state))) 1818 btrfs_cleanup_transaction(fs_info); 1819 set_current_state(TASK_INTERRUPTIBLE); 1820 if (!kthread_should_stop() && 1821 (!btrfs_transaction_blocked(fs_info) || 1822 cannot_commit)) 1823 schedule_timeout(delay); 1824 __set_current_state(TASK_RUNNING); 1825 } while (!kthread_should_stop()); 1826 return 0; 1827 } 1828 1829 /* 1830 * this will find the highest generation in the array of 1831 * root backups. The index of the highest array is returned, 1832 * or -1 if we can't find anything. 1833 * 1834 * We check to make sure the array is valid by comparing the 1835 * generation of the latest root in the array with the generation 1836 * in the super block. If they don't match we pitch it. 1837 */ 1838 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) 1839 { 1840 u64 cur; 1841 int newest_index = -1; 1842 struct btrfs_root_backup *root_backup; 1843 int i; 1844 1845 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { 1846 root_backup = info->super_copy->super_roots + i; 1847 cur = btrfs_backup_tree_root_gen(root_backup); 1848 if (cur == newest_gen) 1849 newest_index = i; 1850 } 1851 1852 /* check to see if we actually wrapped around */ 1853 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { 1854 root_backup = info->super_copy->super_roots; 1855 cur = btrfs_backup_tree_root_gen(root_backup); 1856 if (cur == newest_gen) 1857 newest_index = 0; 1858 } 1859 return newest_index; 1860 } 1861 1862 1863 /* 1864 * find the oldest backup so we know where to store new entries 1865 * in the backup array. This will set the backup_root_index 1866 * field in the fs_info struct 1867 */ 1868 static void find_oldest_super_backup(struct btrfs_fs_info *info, 1869 u64 newest_gen) 1870 { 1871 int newest_index = -1; 1872 1873 newest_index = find_newest_super_backup(info, newest_gen); 1874 /* if there was garbage in there, just move along */ 1875 if (newest_index == -1) { 1876 info->backup_root_index = 0; 1877 } else { 1878 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; 1879 } 1880 } 1881 1882 /* 1883 * copy all the root pointers into the super backup array. 1884 * this will bump the backup pointer by one when it is 1885 * done 1886 */ 1887 static void backup_super_roots(struct btrfs_fs_info *info) 1888 { 1889 int next_backup; 1890 struct btrfs_root_backup *root_backup; 1891 int last_backup; 1892 1893 next_backup = info->backup_root_index; 1894 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % 1895 BTRFS_NUM_BACKUP_ROOTS; 1896 1897 /* 1898 * just overwrite the last backup if we're at the same generation 1899 * this happens only at umount 1900 */ 1901 root_backup = info->super_for_commit->super_roots + last_backup; 1902 if (btrfs_backup_tree_root_gen(root_backup) == 1903 btrfs_header_generation(info->tree_root->node)) 1904 next_backup = last_backup; 1905 1906 root_backup = info->super_for_commit->super_roots + next_backup; 1907 1908 /* 1909 * make sure all of our padding and empty slots get zero filled 1910 * regardless of which ones we use today 1911 */ 1912 memset(root_backup, 0, sizeof(*root_backup)); 1913 1914 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; 1915 1916 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); 1917 btrfs_set_backup_tree_root_gen(root_backup, 1918 btrfs_header_generation(info->tree_root->node)); 1919 1920 btrfs_set_backup_tree_root_level(root_backup, 1921 btrfs_header_level(info->tree_root->node)); 1922 1923 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); 1924 btrfs_set_backup_chunk_root_gen(root_backup, 1925 btrfs_header_generation(info->chunk_root->node)); 1926 btrfs_set_backup_chunk_root_level(root_backup, 1927 btrfs_header_level(info->chunk_root->node)); 1928 1929 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); 1930 btrfs_set_backup_extent_root_gen(root_backup, 1931 btrfs_header_generation(info->extent_root->node)); 1932 btrfs_set_backup_extent_root_level(root_backup, 1933 btrfs_header_level(info->extent_root->node)); 1934 1935 /* 1936 * we might commit during log recovery, which happens before we set 1937 * the fs_root. Make sure it is valid before we fill it in. 1938 */ 1939 if (info->fs_root && info->fs_root->node) { 1940 btrfs_set_backup_fs_root(root_backup, 1941 info->fs_root->node->start); 1942 btrfs_set_backup_fs_root_gen(root_backup, 1943 btrfs_header_generation(info->fs_root->node)); 1944 btrfs_set_backup_fs_root_level(root_backup, 1945 btrfs_header_level(info->fs_root->node)); 1946 } 1947 1948 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); 1949 btrfs_set_backup_dev_root_gen(root_backup, 1950 btrfs_header_generation(info->dev_root->node)); 1951 btrfs_set_backup_dev_root_level(root_backup, 1952 btrfs_header_level(info->dev_root->node)); 1953 1954 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); 1955 btrfs_set_backup_csum_root_gen(root_backup, 1956 btrfs_header_generation(info->csum_root->node)); 1957 btrfs_set_backup_csum_root_level(root_backup, 1958 btrfs_header_level(info->csum_root->node)); 1959 1960 btrfs_set_backup_total_bytes(root_backup, 1961 btrfs_super_total_bytes(info->super_copy)); 1962 btrfs_set_backup_bytes_used(root_backup, 1963 btrfs_super_bytes_used(info->super_copy)); 1964 btrfs_set_backup_num_devices(root_backup, 1965 btrfs_super_num_devices(info->super_copy)); 1966 1967 /* 1968 * if we don't copy this out to the super_copy, it won't get remembered 1969 * for the next commit 1970 */ 1971 memcpy(&info->super_copy->super_roots, 1972 &info->super_for_commit->super_roots, 1973 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); 1974 } 1975 1976 /* 1977 * this copies info out of the root backup array and back into 1978 * the in-memory super block. It is meant to help iterate through 1979 * the array, so you send it the number of backups you've already 1980 * tried and the last backup index you used. 1981 * 1982 * this returns -1 when it has tried all the backups 1983 */ 1984 static noinline int next_root_backup(struct btrfs_fs_info *info, 1985 struct btrfs_super_block *super, 1986 int *num_backups_tried, int *backup_index) 1987 { 1988 struct btrfs_root_backup *root_backup; 1989 int newest = *backup_index; 1990 1991 if (*num_backups_tried == 0) { 1992 u64 gen = btrfs_super_generation(super); 1993 1994 newest = find_newest_super_backup(info, gen); 1995 if (newest == -1) 1996 return -1; 1997 1998 *backup_index = newest; 1999 *num_backups_tried = 1; 2000 } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { 2001 /* we've tried all the backups, all done */ 2002 return -1; 2003 } else { 2004 /* jump to the next oldest backup */ 2005 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % 2006 BTRFS_NUM_BACKUP_ROOTS; 2007 *backup_index = newest; 2008 *num_backups_tried += 1; 2009 } 2010 root_backup = super->super_roots + newest; 2011 2012 btrfs_set_super_generation(super, 2013 btrfs_backup_tree_root_gen(root_backup)); 2014 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); 2015 btrfs_set_super_root_level(super, 2016 btrfs_backup_tree_root_level(root_backup)); 2017 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); 2018 2019 /* 2020 * fixme: the total bytes and num_devices need to match or we should 2021 * need a fsck 2022 */ 2023 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); 2024 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); 2025 return 0; 2026 } 2027 2028 /* helper to cleanup workers */ 2029 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) 2030 { 2031 btrfs_destroy_workqueue(fs_info->fixup_workers); 2032 btrfs_destroy_workqueue(fs_info->delalloc_workers); 2033 btrfs_destroy_workqueue(fs_info->workers); 2034 btrfs_destroy_workqueue(fs_info->endio_workers); 2035 btrfs_destroy_workqueue(fs_info->endio_raid56_workers); 2036 btrfs_destroy_workqueue(fs_info->endio_repair_workers); 2037 btrfs_destroy_workqueue(fs_info->rmw_workers); 2038 btrfs_destroy_workqueue(fs_info->endio_write_workers); 2039 btrfs_destroy_workqueue(fs_info->endio_freespace_worker); 2040 btrfs_destroy_workqueue(fs_info->submit_workers); 2041 btrfs_destroy_workqueue(fs_info->delayed_workers); 2042 btrfs_destroy_workqueue(fs_info->caching_workers); 2043 btrfs_destroy_workqueue(fs_info->readahead_workers); 2044 btrfs_destroy_workqueue(fs_info->flush_workers); 2045 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); 2046 btrfs_destroy_workqueue(fs_info->extent_workers); 2047 /* 2048 * Now that all other work queues are destroyed, we can safely destroy 2049 * the queues used for metadata I/O, since tasks from those other work 2050 * queues can do metadata I/O operations. 2051 */ 2052 btrfs_destroy_workqueue(fs_info->endio_meta_workers); 2053 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); 2054 } 2055 2056 static void free_root_extent_buffers(struct btrfs_root *root) 2057 { 2058 if (root) { 2059 free_extent_buffer(root->node); 2060 free_extent_buffer(root->commit_root); 2061 root->node = NULL; 2062 root->commit_root = NULL; 2063 } 2064 } 2065 2066 /* helper to cleanup tree roots */ 2067 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) 2068 { 2069 free_root_extent_buffers(info->tree_root); 2070 2071 free_root_extent_buffers(info->dev_root); 2072 free_root_extent_buffers(info->extent_root); 2073 free_root_extent_buffers(info->csum_root); 2074 free_root_extent_buffers(info->quota_root); 2075 free_root_extent_buffers(info->uuid_root); 2076 if (chunk_root) 2077 free_root_extent_buffers(info->chunk_root); 2078 free_root_extent_buffers(info->free_space_root); 2079 } 2080 2081 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) 2082 { 2083 int ret; 2084 struct btrfs_root *gang[8]; 2085 int i; 2086 2087 while (!list_empty(&fs_info->dead_roots)) { 2088 gang[0] = list_entry(fs_info->dead_roots.next, 2089 struct btrfs_root, root_list); 2090 list_del(&gang[0]->root_list); 2091 2092 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) { 2093 btrfs_drop_and_free_fs_root(fs_info, gang[0]); 2094 } else { 2095 free_extent_buffer(gang[0]->node); 2096 free_extent_buffer(gang[0]->commit_root); 2097 btrfs_put_fs_root(gang[0]); 2098 } 2099 } 2100 2101 while (1) { 2102 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 2103 (void **)gang, 0, 2104 ARRAY_SIZE(gang)); 2105 if (!ret) 2106 break; 2107 for (i = 0; i < ret; i++) 2108 btrfs_drop_and_free_fs_root(fs_info, gang[i]); 2109 } 2110 2111 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 2112 btrfs_free_log_root_tree(NULL, fs_info); 2113 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); 2114 } 2115 } 2116 2117 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) 2118 { 2119 mutex_init(&fs_info->scrub_lock); 2120 atomic_set(&fs_info->scrubs_running, 0); 2121 atomic_set(&fs_info->scrub_pause_req, 0); 2122 atomic_set(&fs_info->scrubs_paused, 0); 2123 atomic_set(&fs_info->scrub_cancel_req, 0); 2124 init_waitqueue_head(&fs_info->scrub_pause_wait); 2125 fs_info->scrub_workers_refcnt = 0; 2126 } 2127 2128 static void btrfs_init_balance(struct btrfs_fs_info *fs_info) 2129 { 2130 spin_lock_init(&fs_info->balance_lock); 2131 mutex_init(&fs_info->balance_mutex); 2132 atomic_set(&fs_info->balance_running, 0); 2133 atomic_set(&fs_info->balance_pause_req, 0); 2134 atomic_set(&fs_info->balance_cancel_req, 0); 2135 fs_info->balance_ctl = NULL; 2136 init_waitqueue_head(&fs_info->balance_wait_q); 2137 } 2138 2139 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info) 2140 { 2141 struct inode *inode = fs_info->btree_inode; 2142 2143 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; 2144 set_nlink(inode, 1); 2145 /* 2146 * we set the i_size on the btree inode to the max possible int. 2147 * the real end of the address space is determined by all of 2148 * the devices in the system 2149 */ 2150 inode->i_size = OFFSET_MAX; 2151 inode->i_mapping->a_ops = &btree_aops; 2152 2153 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 2154 extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode); 2155 BTRFS_I(inode)->io_tree.track_uptodate = 0; 2156 extent_map_tree_init(&BTRFS_I(inode)->extent_tree); 2157 2158 BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops; 2159 2160 BTRFS_I(inode)->root = fs_info->tree_root; 2161 memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key)); 2162 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 2163 btrfs_insert_inode_hash(inode); 2164 } 2165 2166 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) 2167 { 2168 fs_info->dev_replace.lock_owner = 0; 2169 atomic_set(&fs_info->dev_replace.nesting_level, 0); 2170 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); 2171 rwlock_init(&fs_info->dev_replace.lock); 2172 atomic_set(&fs_info->dev_replace.read_locks, 0); 2173 atomic_set(&fs_info->dev_replace.blocking_readers, 0); 2174 init_waitqueue_head(&fs_info->replace_wait); 2175 init_waitqueue_head(&fs_info->dev_replace.read_lock_wq); 2176 } 2177 2178 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) 2179 { 2180 spin_lock_init(&fs_info->qgroup_lock); 2181 mutex_init(&fs_info->qgroup_ioctl_lock); 2182 fs_info->qgroup_tree = RB_ROOT; 2183 fs_info->qgroup_op_tree = RB_ROOT; 2184 INIT_LIST_HEAD(&fs_info->dirty_qgroups); 2185 fs_info->qgroup_seq = 1; 2186 fs_info->qgroup_ulist = NULL; 2187 fs_info->qgroup_rescan_running = false; 2188 mutex_init(&fs_info->qgroup_rescan_lock); 2189 } 2190 2191 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, 2192 struct btrfs_fs_devices *fs_devices) 2193 { 2194 int max_active = fs_info->thread_pool_size; 2195 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; 2196 2197 fs_info->workers = 2198 btrfs_alloc_workqueue(fs_info, "worker", 2199 flags | WQ_HIGHPRI, max_active, 16); 2200 2201 fs_info->delalloc_workers = 2202 btrfs_alloc_workqueue(fs_info, "delalloc", 2203 flags, max_active, 2); 2204 2205 fs_info->flush_workers = 2206 btrfs_alloc_workqueue(fs_info, "flush_delalloc", 2207 flags, max_active, 0); 2208 2209 fs_info->caching_workers = 2210 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0); 2211 2212 /* 2213 * a higher idle thresh on the submit workers makes it much more 2214 * likely that bios will be send down in a sane order to the 2215 * devices 2216 */ 2217 fs_info->submit_workers = 2218 btrfs_alloc_workqueue(fs_info, "submit", flags, 2219 min_t(u64, fs_devices->num_devices, 2220 max_active), 64); 2221 2222 fs_info->fixup_workers = 2223 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0); 2224 2225 /* 2226 * endios are largely parallel and should have a very 2227 * low idle thresh 2228 */ 2229 fs_info->endio_workers = 2230 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4); 2231 fs_info->endio_meta_workers = 2232 btrfs_alloc_workqueue(fs_info, "endio-meta", flags, 2233 max_active, 4); 2234 fs_info->endio_meta_write_workers = 2235 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags, 2236 max_active, 2); 2237 fs_info->endio_raid56_workers = 2238 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, 2239 max_active, 4); 2240 fs_info->endio_repair_workers = 2241 btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0); 2242 fs_info->rmw_workers = 2243 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2); 2244 fs_info->endio_write_workers = 2245 btrfs_alloc_workqueue(fs_info, "endio-write", flags, 2246 max_active, 2); 2247 fs_info->endio_freespace_worker = 2248 btrfs_alloc_workqueue(fs_info, "freespace-write", flags, 2249 max_active, 0); 2250 fs_info->delayed_workers = 2251 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags, 2252 max_active, 0); 2253 fs_info->readahead_workers = 2254 btrfs_alloc_workqueue(fs_info, "readahead", flags, 2255 max_active, 2); 2256 fs_info->qgroup_rescan_workers = 2257 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0); 2258 fs_info->extent_workers = 2259 btrfs_alloc_workqueue(fs_info, "extent-refs", flags, 2260 min_t(u64, fs_devices->num_devices, 2261 max_active), 8); 2262 2263 if (!(fs_info->workers && fs_info->delalloc_workers && 2264 fs_info->submit_workers && fs_info->flush_workers && 2265 fs_info->endio_workers && fs_info->endio_meta_workers && 2266 fs_info->endio_meta_write_workers && 2267 fs_info->endio_repair_workers && 2268 fs_info->endio_write_workers && fs_info->endio_raid56_workers && 2269 fs_info->endio_freespace_worker && fs_info->rmw_workers && 2270 fs_info->caching_workers && fs_info->readahead_workers && 2271 fs_info->fixup_workers && fs_info->delayed_workers && 2272 fs_info->extent_workers && 2273 fs_info->qgroup_rescan_workers)) { 2274 return -ENOMEM; 2275 } 2276 2277 return 0; 2278 } 2279 2280 static int btrfs_replay_log(struct btrfs_fs_info *fs_info, 2281 struct btrfs_fs_devices *fs_devices) 2282 { 2283 int ret; 2284 struct btrfs_root *log_tree_root; 2285 struct btrfs_super_block *disk_super = fs_info->super_copy; 2286 u64 bytenr = btrfs_super_log_root(disk_super); 2287 2288 if (fs_devices->rw_devices == 0) { 2289 btrfs_warn(fs_info, "log replay required on RO media"); 2290 return -EIO; 2291 } 2292 2293 log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2294 if (!log_tree_root) 2295 return -ENOMEM; 2296 2297 __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); 2298 2299 log_tree_root->node = read_tree_block(fs_info, bytenr, 2300 fs_info->generation + 1); 2301 if (IS_ERR(log_tree_root->node)) { 2302 btrfs_warn(fs_info, "failed to read log tree"); 2303 ret = PTR_ERR(log_tree_root->node); 2304 kfree(log_tree_root); 2305 return ret; 2306 } else if (!extent_buffer_uptodate(log_tree_root->node)) { 2307 btrfs_err(fs_info, "failed to read log tree"); 2308 free_extent_buffer(log_tree_root->node); 2309 kfree(log_tree_root); 2310 return -EIO; 2311 } 2312 /* returns with log_tree_root freed on success */ 2313 ret = btrfs_recover_log_trees(log_tree_root); 2314 if (ret) { 2315 btrfs_handle_fs_error(fs_info, ret, 2316 "Failed to recover log tree"); 2317 free_extent_buffer(log_tree_root->node); 2318 kfree(log_tree_root); 2319 return ret; 2320 } 2321 2322 if (sb_rdonly(fs_info->sb)) { 2323 ret = btrfs_commit_super(fs_info); 2324 if (ret) 2325 return ret; 2326 } 2327 2328 return 0; 2329 } 2330 2331 static int btrfs_read_roots(struct btrfs_fs_info *fs_info) 2332 { 2333 struct btrfs_root *tree_root = fs_info->tree_root; 2334 struct btrfs_root *root; 2335 struct btrfs_key location; 2336 int ret; 2337 2338 BUG_ON(!fs_info->tree_root); 2339 2340 location.objectid = BTRFS_EXTENT_TREE_OBJECTID; 2341 location.type = BTRFS_ROOT_ITEM_KEY; 2342 location.offset = 0; 2343 2344 root = btrfs_read_tree_root(tree_root, &location); 2345 if (IS_ERR(root)) 2346 return PTR_ERR(root); 2347 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2348 fs_info->extent_root = root; 2349 2350 location.objectid = BTRFS_DEV_TREE_OBJECTID; 2351 root = btrfs_read_tree_root(tree_root, &location); 2352 if (IS_ERR(root)) 2353 return PTR_ERR(root); 2354 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2355 fs_info->dev_root = root; 2356 btrfs_init_devices_late(fs_info); 2357 2358 location.objectid = BTRFS_CSUM_TREE_OBJECTID; 2359 root = btrfs_read_tree_root(tree_root, &location); 2360 if (IS_ERR(root)) 2361 return PTR_ERR(root); 2362 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2363 fs_info->csum_root = root; 2364 2365 location.objectid = BTRFS_QUOTA_TREE_OBJECTID; 2366 root = btrfs_read_tree_root(tree_root, &location); 2367 if (!IS_ERR(root)) { 2368 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2369 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 2370 fs_info->quota_root = root; 2371 } 2372 2373 location.objectid = BTRFS_UUID_TREE_OBJECTID; 2374 root = btrfs_read_tree_root(tree_root, &location); 2375 if (IS_ERR(root)) { 2376 ret = PTR_ERR(root); 2377 if (ret != -ENOENT) 2378 return ret; 2379 } else { 2380 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2381 fs_info->uuid_root = root; 2382 } 2383 2384 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 2385 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; 2386 root = btrfs_read_tree_root(tree_root, &location); 2387 if (IS_ERR(root)) 2388 return PTR_ERR(root); 2389 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2390 fs_info->free_space_root = root; 2391 } 2392 2393 return 0; 2394 } 2395 2396 int open_ctree(struct super_block *sb, 2397 struct btrfs_fs_devices *fs_devices, 2398 char *options) 2399 { 2400 u32 sectorsize; 2401 u32 nodesize; 2402 u32 stripesize; 2403 u64 generation; 2404 u64 features; 2405 struct btrfs_key location; 2406 struct buffer_head *bh; 2407 struct btrfs_super_block *disk_super; 2408 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2409 struct btrfs_root *tree_root; 2410 struct btrfs_root *chunk_root; 2411 int ret; 2412 int err = -EINVAL; 2413 int num_backups_tried = 0; 2414 int backup_index = 0; 2415 int max_active; 2416 int clear_free_space_tree = 0; 2417 2418 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2419 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2420 if (!tree_root || !chunk_root) { 2421 err = -ENOMEM; 2422 goto fail; 2423 } 2424 2425 ret = init_srcu_struct(&fs_info->subvol_srcu); 2426 if (ret) { 2427 err = ret; 2428 goto fail; 2429 } 2430 2431 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); 2432 if (ret) { 2433 err = ret; 2434 goto fail_srcu; 2435 } 2436 fs_info->dirty_metadata_batch = PAGE_SIZE * 2437 (1 + ilog2(nr_cpu_ids)); 2438 2439 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); 2440 if (ret) { 2441 err = ret; 2442 goto fail_dirty_metadata_bytes; 2443 } 2444 2445 ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL); 2446 if (ret) { 2447 err = ret; 2448 goto fail_delalloc_bytes; 2449 } 2450 2451 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); 2452 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); 2453 INIT_LIST_HEAD(&fs_info->trans_list); 2454 INIT_LIST_HEAD(&fs_info->dead_roots); 2455 INIT_LIST_HEAD(&fs_info->delayed_iputs); 2456 INIT_LIST_HEAD(&fs_info->delalloc_roots); 2457 INIT_LIST_HEAD(&fs_info->caching_block_groups); 2458 spin_lock_init(&fs_info->delalloc_root_lock); 2459 spin_lock_init(&fs_info->trans_lock); 2460 spin_lock_init(&fs_info->fs_roots_radix_lock); 2461 spin_lock_init(&fs_info->delayed_iput_lock); 2462 spin_lock_init(&fs_info->defrag_inodes_lock); 2463 spin_lock_init(&fs_info->tree_mod_seq_lock); 2464 spin_lock_init(&fs_info->super_lock); 2465 spin_lock_init(&fs_info->qgroup_op_lock); 2466 spin_lock_init(&fs_info->buffer_lock); 2467 spin_lock_init(&fs_info->unused_bgs_lock); 2468 rwlock_init(&fs_info->tree_mod_log_lock); 2469 mutex_init(&fs_info->unused_bg_unpin_mutex); 2470 mutex_init(&fs_info->delete_unused_bgs_mutex); 2471 mutex_init(&fs_info->reloc_mutex); 2472 mutex_init(&fs_info->delalloc_root_mutex); 2473 mutex_init(&fs_info->cleaner_delayed_iput_mutex); 2474 seqlock_init(&fs_info->profiles_lock); 2475 2476 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 2477 INIT_LIST_HEAD(&fs_info->space_info); 2478 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); 2479 INIT_LIST_HEAD(&fs_info->unused_bgs); 2480 btrfs_mapping_init(&fs_info->mapping_tree); 2481 btrfs_init_block_rsv(&fs_info->global_block_rsv, 2482 BTRFS_BLOCK_RSV_GLOBAL); 2483 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); 2484 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); 2485 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); 2486 btrfs_init_block_rsv(&fs_info->delayed_block_rsv, 2487 BTRFS_BLOCK_RSV_DELOPS); 2488 atomic_set(&fs_info->async_delalloc_pages, 0); 2489 atomic_set(&fs_info->defrag_running, 0); 2490 atomic_set(&fs_info->qgroup_op_seq, 0); 2491 atomic_set(&fs_info->reada_works_cnt, 0); 2492 atomic64_set(&fs_info->tree_mod_seq, 0); 2493 fs_info->sb = sb; 2494 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; 2495 fs_info->metadata_ratio = 0; 2496 fs_info->defrag_inodes = RB_ROOT; 2497 atomic64_set(&fs_info->free_chunk_space, 0); 2498 fs_info->tree_mod_log = RB_ROOT; 2499 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 2500 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ 2501 /* readahead state */ 2502 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 2503 spin_lock_init(&fs_info->reada_lock); 2504 btrfs_init_ref_verify(fs_info); 2505 2506 fs_info->thread_pool_size = min_t(unsigned long, 2507 num_online_cpus() + 2, 8); 2508 2509 INIT_LIST_HEAD(&fs_info->ordered_roots); 2510 spin_lock_init(&fs_info->ordered_root_lock); 2511 2512 fs_info->btree_inode = new_inode(sb); 2513 if (!fs_info->btree_inode) { 2514 err = -ENOMEM; 2515 goto fail_bio_counter; 2516 } 2517 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); 2518 2519 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), 2520 GFP_KERNEL); 2521 if (!fs_info->delayed_root) { 2522 err = -ENOMEM; 2523 goto fail_iput; 2524 } 2525 btrfs_init_delayed_root(fs_info->delayed_root); 2526 2527 btrfs_init_scrub(fs_info); 2528 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 2529 fs_info->check_integrity_print_mask = 0; 2530 #endif 2531 btrfs_init_balance(fs_info); 2532 btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work); 2533 2534 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; 2535 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); 2536 2537 btrfs_init_btree_inode(fs_info); 2538 2539 spin_lock_init(&fs_info->block_group_cache_lock); 2540 fs_info->block_group_cache_tree = RB_ROOT; 2541 fs_info->first_logical_byte = (u64)-1; 2542 2543 extent_io_tree_init(&fs_info->freed_extents[0], NULL); 2544 extent_io_tree_init(&fs_info->freed_extents[1], NULL); 2545 fs_info->pinned_extents = &fs_info->freed_extents[0]; 2546 set_bit(BTRFS_FS_BARRIER, &fs_info->flags); 2547 2548 mutex_init(&fs_info->ordered_operations_mutex); 2549 mutex_init(&fs_info->tree_log_mutex); 2550 mutex_init(&fs_info->chunk_mutex); 2551 mutex_init(&fs_info->transaction_kthread_mutex); 2552 mutex_init(&fs_info->cleaner_mutex); 2553 mutex_init(&fs_info->volume_mutex); 2554 mutex_init(&fs_info->ro_block_group_mutex); 2555 init_rwsem(&fs_info->commit_root_sem); 2556 init_rwsem(&fs_info->cleanup_work_sem); 2557 init_rwsem(&fs_info->subvol_sem); 2558 sema_init(&fs_info->uuid_tree_rescan_sem, 1); 2559 2560 btrfs_init_dev_replace_locks(fs_info); 2561 btrfs_init_qgroup(fs_info); 2562 2563 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); 2564 btrfs_init_free_cluster(&fs_info->data_alloc_cluster); 2565 2566 init_waitqueue_head(&fs_info->transaction_throttle); 2567 init_waitqueue_head(&fs_info->transaction_wait); 2568 init_waitqueue_head(&fs_info->transaction_blocked_wait); 2569 init_waitqueue_head(&fs_info->async_submit_wait); 2570 2571 INIT_LIST_HEAD(&fs_info->pinned_chunks); 2572 2573 /* Usable values until the real ones are cached from the superblock */ 2574 fs_info->nodesize = 4096; 2575 fs_info->sectorsize = 4096; 2576 fs_info->stripesize = 4096; 2577 2578 ret = btrfs_alloc_stripe_hash_table(fs_info); 2579 if (ret) { 2580 err = ret; 2581 goto fail_alloc; 2582 } 2583 2584 __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); 2585 2586 invalidate_bdev(fs_devices->latest_bdev); 2587 2588 /* 2589 * Read super block and check the signature bytes only 2590 */ 2591 bh = btrfs_read_dev_super(fs_devices->latest_bdev); 2592 if (IS_ERR(bh)) { 2593 err = PTR_ERR(bh); 2594 goto fail_alloc; 2595 } 2596 2597 /* 2598 * We want to check superblock checksum, the type is stored inside. 2599 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). 2600 */ 2601 if (btrfs_check_super_csum(fs_info, bh->b_data)) { 2602 btrfs_err(fs_info, "superblock checksum mismatch"); 2603 err = -EINVAL; 2604 brelse(bh); 2605 goto fail_alloc; 2606 } 2607 2608 /* 2609 * super_copy is zeroed at allocation time and we never touch the 2610 * following bytes up to INFO_SIZE, the checksum is calculated from 2611 * the whole block of INFO_SIZE 2612 */ 2613 memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); 2614 memcpy(fs_info->super_for_commit, fs_info->super_copy, 2615 sizeof(*fs_info->super_for_commit)); 2616 brelse(bh); 2617 2618 memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); 2619 2620 ret = btrfs_check_super_valid(fs_info); 2621 if (ret) { 2622 btrfs_err(fs_info, "superblock contains fatal errors"); 2623 err = -EINVAL; 2624 goto fail_alloc; 2625 } 2626 2627 disk_super = fs_info->super_copy; 2628 if (!btrfs_super_root(disk_super)) 2629 goto fail_alloc; 2630 2631 /* check FS state, whether FS is broken. */ 2632 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) 2633 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); 2634 2635 /* 2636 * run through our array of backup supers and setup 2637 * our ring pointer to the oldest one 2638 */ 2639 generation = btrfs_super_generation(disk_super); 2640 find_oldest_super_backup(fs_info, generation); 2641 2642 /* 2643 * In the long term, we'll store the compression type in the super 2644 * block, and it'll be used for per file compression control. 2645 */ 2646 fs_info->compress_type = BTRFS_COMPRESS_ZLIB; 2647 2648 ret = btrfs_parse_options(fs_info, options, sb->s_flags); 2649 if (ret) { 2650 err = ret; 2651 goto fail_alloc; 2652 } 2653 2654 features = btrfs_super_incompat_flags(disk_super) & 2655 ~BTRFS_FEATURE_INCOMPAT_SUPP; 2656 if (features) { 2657 btrfs_err(fs_info, 2658 "cannot mount because of unsupported optional features (%llx)", 2659 features); 2660 err = -EINVAL; 2661 goto fail_alloc; 2662 } 2663 2664 features = btrfs_super_incompat_flags(disk_super); 2665 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; 2666 if (fs_info->compress_type == BTRFS_COMPRESS_LZO) 2667 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; 2668 else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD) 2669 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD; 2670 2671 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) 2672 btrfs_info(fs_info, "has skinny extents"); 2673 2674 /* 2675 * flag our filesystem as having big metadata blocks if 2676 * they are bigger than the page size 2677 */ 2678 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { 2679 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) 2680 btrfs_info(fs_info, 2681 "flagging fs with big metadata feature"); 2682 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; 2683 } 2684 2685 nodesize = btrfs_super_nodesize(disk_super); 2686 sectorsize = btrfs_super_sectorsize(disk_super); 2687 stripesize = sectorsize; 2688 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); 2689 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); 2690 2691 /* Cache block sizes */ 2692 fs_info->nodesize = nodesize; 2693 fs_info->sectorsize = sectorsize; 2694 fs_info->stripesize = stripesize; 2695 2696 /* 2697 * mixed block groups end up with duplicate but slightly offset 2698 * extent buffers for the same range. It leads to corruptions 2699 */ 2700 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && 2701 (sectorsize != nodesize)) { 2702 btrfs_err(fs_info, 2703 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups", 2704 nodesize, sectorsize); 2705 goto fail_alloc; 2706 } 2707 2708 /* 2709 * Needn't use the lock because there is no other task which will 2710 * update the flag. 2711 */ 2712 btrfs_set_super_incompat_flags(disk_super, features); 2713 2714 features = btrfs_super_compat_ro_flags(disk_super) & 2715 ~BTRFS_FEATURE_COMPAT_RO_SUPP; 2716 if (!sb_rdonly(sb) && features) { 2717 btrfs_err(fs_info, 2718 "cannot mount read-write because of unsupported optional features (%llx)", 2719 features); 2720 err = -EINVAL; 2721 goto fail_alloc; 2722 } 2723 2724 max_active = fs_info->thread_pool_size; 2725 2726 ret = btrfs_init_workqueues(fs_info, fs_devices); 2727 if (ret) { 2728 err = ret; 2729 goto fail_sb_buffer; 2730 } 2731 2732 sb->s_bdi->congested_fn = btrfs_congested_fn; 2733 sb->s_bdi->congested_data = fs_info; 2734 sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; 2735 sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE; 2736 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); 2737 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); 2738 2739 sb->s_blocksize = sectorsize; 2740 sb->s_blocksize_bits = blksize_bits(sectorsize); 2741 memcpy(&sb->s_uuid, fs_info->fsid, BTRFS_FSID_SIZE); 2742 2743 mutex_lock(&fs_info->chunk_mutex); 2744 ret = btrfs_read_sys_array(fs_info); 2745 mutex_unlock(&fs_info->chunk_mutex); 2746 if (ret) { 2747 btrfs_err(fs_info, "failed to read the system array: %d", ret); 2748 goto fail_sb_buffer; 2749 } 2750 2751 generation = btrfs_super_chunk_root_generation(disk_super); 2752 2753 __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); 2754 2755 chunk_root->node = read_tree_block(fs_info, 2756 btrfs_super_chunk_root(disk_super), 2757 generation); 2758 if (IS_ERR(chunk_root->node) || 2759 !extent_buffer_uptodate(chunk_root->node)) { 2760 btrfs_err(fs_info, "failed to read chunk root"); 2761 if (!IS_ERR(chunk_root->node)) 2762 free_extent_buffer(chunk_root->node); 2763 chunk_root->node = NULL; 2764 goto fail_tree_roots; 2765 } 2766 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 2767 chunk_root->commit_root = btrfs_root_node(chunk_root); 2768 2769 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, 2770 btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); 2771 2772 ret = btrfs_read_chunk_tree(fs_info); 2773 if (ret) { 2774 btrfs_err(fs_info, "failed to read chunk tree: %d", ret); 2775 goto fail_tree_roots; 2776 } 2777 2778 /* 2779 * keep the device that is marked to be the target device for the 2780 * dev_replace procedure 2781 */ 2782 btrfs_close_extra_devices(fs_devices, 0); 2783 2784 if (!fs_devices->latest_bdev) { 2785 btrfs_err(fs_info, "failed to read devices"); 2786 goto fail_tree_roots; 2787 } 2788 2789 retry_root_backup: 2790 generation = btrfs_super_generation(disk_super); 2791 2792 tree_root->node = read_tree_block(fs_info, 2793 btrfs_super_root(disk_super), 2794 generation); 2795 if (IS_ERR(tree_root->node) || 2796 !extent_buffer_uptodate(tree_root->node)) { 2797 btrfs_warn(fs_info, "failed to read tree root"); 2798 if (!IS_ERR(tree_root->node)) 2799 free_extent_buffer(tree_root->node); 2800 tree_root->node = NULL; 2801 goto recovery_tree_root; 2802 } 2803 2804 btrfs_set_root_node(&tree_root->root_item, tree_root->node); 2805 tree_root->commit_root = btrfs_root_node(tree_root); 2806 btrfs_set_root_refs(&tree_root->root_item, 1); 2807 2808 mutex_lock(&tree_root->objectid_mutex); 2809 ret = btrfs_find_highest_objectid(tree_root, 2810 &tree_root->highest_objectid); 2811 if (ret) { 2812 mutex_unlock(&tree_root->objectid_mutex); 2813 goto recovery_tree_root; 2814 } 2815 2816 ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); 2817 2818 mutex_unlock(&tree_root->objectid_mutex); 2819 2820 ret = btrfs_read_roots(fs_info); 2821 if (ret) 2822 goto recovery_tree_root; 2823 2824 fs_info->generation = generation; 2825 fs_info->last_trans_committed = generation; 2826 2827 ret = btrfs_recover_balance(fs_info); 2828 if (ret) { 2829 btrfs_err(fs_info, "failed to recover balance: %d", ret); 2830 goto fail_block_groups; 2831 } 2832 2833 ret = btrfs_init_dev_stats(fs_info); 2834 if (ret) { 2835 btrfs_err(fs_info, "failed to init dev_stats: %d", ret); 2836 goto fail_block_groups; 2837 } 2838 2839 ret = btrfs_init_dev_replace(fs_info); 2840 if (ret) { 2841 btrfs_err(fs_info, "failed to init dev_replace: %d", ret); 2842 goto fail_block_groups; 2843 } 2844 2845 btrfs_close_extra_devices(fs_devices, 1); 2846 2847 ret = btrfs_sysfs_add_fsid(fs_devices, NULL); 2848 if (ret) { 2849 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", 2850 ret); 2851 goto fail_block_groups; 2852 } 2853 2854 ret = btrfs_sysfs_add_device(fs_devices); 2855 if (ret) { 2856 btrfs_err(fs_info, "failed to init sysfs device interface: %d", 2857 ret); 2858 goto fail_fsdev_sysfs; 2859 } 2860 2861 ret = btrfs_sysfs_add_mounted(fs_info); 2862 if (ret) { 2863 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret); 2864 goto fail_fsdev_sysfs; 2865 } 2866 2867 ret = btrfs_init_space_info(fs_info); 2868 if (ret) { 2869 btrfs_err(fs_info, "failed to initialize space info: %d", ret); 2870 goto fail_sysfs; 2871 } 2872 2873 ret = btrfs_read_block_groups(fs_info); 2874 if (ret) { 2875 btrfs_err(fs_info, "failed to read block groups: %d", ret); 2876 goto fail_sysfs; 2877 } 2878 2879 if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info)) { 2880 btrfs_warn(fs_info, 2881 "writeable mount is not allowed due to too many missing devices"); 2882 goto fail_sysfs; 2883 } 2884 2885 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, 2886 "btrfs-cleaner"); 2887 if (IS_ERR(fs_info->cleaner_kthread)) 2888 goto fail_sysfs; 2889 2890 fs_info->transaction_kthread = kthread_run(transaction_kthread, 2891 tree_root, 2892 "btrfs-transaction"); 2893 if (IS_ERR(fs_info->transaction_kthread)) 2894 goto fail_cleaner; 2895 2896 if (!btrfs_test_opt(fs_info, NOSSD) && 2897 !fs_info->fs_devices->rotating) { 2898 btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations"); 2899 } 2900 2901 /* 2902 * Mount does not set all options immediately, we can do it now and do 2903 * not have to wait for transaction commit 2904 */ 2905 btrfs_apply_pending_changes(fs_info); 2906 2907 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 2908 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) { 2909 ret = btrfsic_mount(fs_info, fs_devices, 2910 btrfs_test_opt(fs_info, 2911 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? 2912 1 : 0, 2913 fs_info->check_integrity_print_mask); 2914 if (ret) 2915 btrfs_warn(fs_info, 2916 "failed to initialize integrity check module: %d", 2917 ret); 2918 } 2919 #endif 2920 ret = btrfs_read_qgroup_config(fs_info); 2921 if (ret) 2922 goto fail_trans_kthread; 2923 2924 if (btrfs_build_ref_tree(fs_info)) 2925 btrfs_err(fs_info, "couldn't build ref tree"); 2926 2927 /* do not make disk changes in broken FS or nologreplay is given */ 2928 if (btrfs_super_log_root(disk_super) != 0 && 2929 !btrfs_test_opt(fs_info, NOLOGREPLAY)) { 2930 ret = btrfs_replay_log(fs_info, fs_devices); 2931 if (ret) { 2932 err = ret; 2933 goto fail_qgroup; 2934 } 2935 } 2936 2937 ret = btrfs_find_orphan_roots(fs_info); 2938 if (ret) 2939 goto fail_qgroup; 2940 2941 if (!sb_rdonly(sb)) { 2942 ret = btrfs_cleanup_fs_roots(fs_info); 2943 if (ret) 2944 goto fail_qgroup; 2945 2946 mutex_lock(&fs_info->cleaner_mutex); 2947 ret = btrfs_recover_relocation(tree_root); 2948 mutex_unlock(&fs_info->cleaner_mutex); 2949 if (ret < 0) { 2950 btrfs_warn(fs_info, "failed to recover relocation: %d", 2951 ret); 2952 err = -EINVAL; 2953 goto fail_qgroup; 2954 } 2955 } 2956 2957 location.objectid = BTRFS_FS_TREE_OBJECTID; 2958 location.type = BTRFS_ROOT_ITEM_KEY; 2959 location.offset = 0; 2960 2961 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); 2962 if (IS_ERR(fs_info->fs_root)) { 2963 err = PTR_ERR(fs_info->fs_root); 2964 goto fail_qgroup; 2965 } 2966 2967 if (sb_rdonly(sb)) 2968 return 0; 2969 2970 if (btrfs_test_opt(fs_info, CLEAR_CACHE) && 2971 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 2972 clear_free_space_tree = 1; 2973 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 2974 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) { 2975 btrfs_warn(fs_info, "free space tree is invalid"); 2976 clear_free_space_tree = 1; 2977 } 2978 2979 if (clear_free_space_tree) { 2980 btrfs_info(fs_info, "clearing free space tree"); 2981 ret = btrfs_clear_free_space_tree(fs_info); 2982 if (ret) { 2983 btrfs_warn(fs_info, 2984 "failed to clear free space tree: %d", ret); 2985 close_ctree(fs_info); 2986 return ret; 2987 } 2988 } 2989 2990 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) && 2991 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 2992 btrfs_info(fs_info, "creating free space tree"); 2993 ret = btrfs_create_free_space_tree(fs_info); 2994 if (ret) { 2995 btrfs_warn(fs_info, 2996 "failed to create free space tree: %d", ret); 2997 close_ctree(fs_info); 2998 return ret; 2999 } 3000 } 3001 3002 down_read(&fs_info->cleanup_work_sem); 3003 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || 3004 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { 3005 up_read(&fs_info->cleanup_work_sem); 3006 close_ctree(fs_info); 3007 return ret; 3008 } 3009 up_read(&fs_info->cleanup_work_sem); 3010 3011 ret = btrfs_resume_balance_async(fs_info); 3012 if (ret) { 3013 btrfs_warn(fs_info, "failed to resume balance: %d", ret); 3014 close_ctree(fs_info); 3015 return ret; 3016 } 3017 3018 ret = btrfs_resume_dev_replace_async(fs_info); 3019 if (ret) { 3020 btrfs_warn(fs_info, "failed to resume device replace: %d", ret); 3021 close_ctree(fs_info); 3022 return ret; 3023 } 3024 3025 btrfs_qgroup_rescan_resume(fs_info); 3026 3027 if (!fs_info->uuid_root) { 3028 btrfs_info(fs_info, "creating UUID tree"); 3029 ret = btrfs_create_uuid_tree(fs_info); 3030 if (ret) { 3031 btrfs_warn(fs_info, 3032 "failed to create the UUID tree: %d", ret); 3033 close_ctree(fs_info); 3034 return ret; 3035 } 3036 } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) || 3037 fs_info->generation != 3038 btrfs_super_uuid_tree_generation(disk_super)) { 3039 btrfs_info(fs_info, "checking UUID tree"); 3040 ret = btrfs_check_uuid_tree(fs_info); 3041 if (ret) { 3042 btrfs_warn(fs_info, 3043 "failed to check the UUID tree: %d", ret); 3044 close_ctree(fs_info); 3045 return ret; 3046 } 3047 } else { 3048 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 3049 } 3050 set_bit(BTRFS_FS_OPEN, &fs_info->flags); 3051 3052 /* 3053 * backuproot only affect mount behavior, and if open_ctree succeeded, 3054 * no need to keep the flag 3055 */ 3056 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); 3057 3058 return 0; 3059 3060 fail_qgroup: 3061 btrfs_free_qgroup_config(fs_info); 3062 fail_trans_kthread: 3063 kthread_stop(fs_info->transaction_kthread); 3064 btrfs_cleanup_transaction(fs_info); 3065 btrfs_free_fs_roots(fs_info); 3066 fail_cleaner: 3067 kthread_stop(fs_info->cleaner_kthread); 3068 3069 /* 3070 * make sure we're done with the btree inode before we stop our 3071 * kthreads 3072 */ 3073 filemap_write_and_wait(fs_info->btree_inode->i_mapping); 3074 3075 fail_sysfs: 3076 btrfs_sysfs_remove_mounted(fs_info); 3077 3078 fail_fsdev_sysfs: 3079 btrfs_sysfs_remove_fsid(fs_info->fs_devices); 3080 3081 fail_block_groups: 3082 btrfs_put_block_group_cache(fs_info); 3083 3084 fail_tree_roots: 3085 free_root_pointers(fs_info, 1); 3086 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 3087 3088 fail_sb_buffer: 3089 btrfs_stop_all_workers(fs_info); 3090 btrfs_free_block_groups(fs_info); 3091 fail_alloc: 3092 fail_iput: 3093 btrfs_mapping_tree_free(&fs_info->mapping_tree); 3094 3095 iput(fs_info->btree_inode); 3096 fail_bio_counter: 3097 percpu_counter_destroy(&fs_info->bio_counter); 3098 fail_delalloc_bytes: 3099 percpu_counter_destroy(&fs_info->delalloc_bytes); 3100 fail_dirty_metadata_bytes: 3101 percpu_counter_destroy(&fs_info->dirty_metadata_bytes); 3102 fail_srcu: 3103 cleanup_srcu_struct(&fs_info->subvol_srcu); 3104 fail: 3105 btrfs_free_stripe_hash_table(fs_info); 3106 btrfs_close_devices(fs_info->fs_devices); 3107 return err; 3108 3109 recovery_tree_root: 3110 if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) 3111 goto fail_tree_roots; 3112 3113 free_root_pointers(fs_info, 0); 3114 3115 /* don't use the log in recovery mode, it won't be valid */ 3116 btrfs_set_super_log_root(disk_super, 0); 3117 3118 /* we can't trust the free space cache either */ 3119 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); 3120 3121 ret = next_root_backup(fs_info, fs_info->super_copy, 3122 &num_backups_tried, &backup_index); 3123 if (ret == -1) 3124 goto fail_block_groups; 3125 goto retry_root_backup; 3126 } 3127 BPF_ALLOW_ERROR_INJECTION(open_ctree); 3128 3129 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) 3130 { 3131 if (uptodate) { 3132 set_buffer_uptodate(bh); 3133 } else { 3134 struct btrfs_device *device = (struct btrfs_device *) 3135 bh->b_private; 3136 3137 btrfs_warn_rl_in_rcu(device->fs_info, 3138 "lost page write due to IO error on %s", 3139 rcu_str_deref(device->name)); 3140 /* note, we don't set_buffer_write_io_error because we have 3141 * our own ways of dealing with the IO errors 3142 */ 3143 clear_buffer_uptodate(bh); 3144 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); 3145 } 3146 unlock_buffer(bh); 3147 put_bh(bh); 3148 } 3149 3150 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, 3151 struct buffer_head **bh_ret) 3152 { 3153 struct buffer_head *bh; 3154 struct btrfs_super_block *super; 3155 u64 bytenr; 3156 3157 bytenr = btrfs_sb_offset(copy_num); 3158 if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode)) 3159 return -EINVAL; 3160 3161 bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE); 3162 /* 3163 * If we fail to read from the underlying devices, as of now 3164 * the best option we have is to mark it EIO. 3165 */ 3166 if (!bh) 3167 return -EIO; 3168 3169 super = (struct btrfs_super_block *)bh->b_data; 3170 if (btrfs_super_bytenr(super) != bytenr || 3171 btrfs_super_magic(super) != BTRFS_MAGIC) { 3172 brelse(bh); 3173 return -EINVAL; 3174 } 3175 3176 *bh_ret = bh; 3177 return 0; 3178 } 3179 3180 3181 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) 3182 { 3183 struct buffer_head *bh; 3184 struct buffer_head *latest = NULL; 3185 struct btrfs_super_block *super; 3186 int i; 3187 u64 transid = 0; 3188 int ret = -EINVAL; 3189 3190 /* we would like to check all the supers, but that would make 3191 * a btrfs mount succeed after a mkfs from a different FS. 3192 * So, we need to add a special mount option to scan for 3193 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 3194 */ 3195 for (i = 0; i < 1; i++) { 3196 ret = btrfs_read_dev_one_super(bdev, i, &bh); 3197 if (ret) 3198 continue; 3199 3200 super = (struct btrfs_super_block *)bh->b_data; 3201 3202 if (!latest || btrfs_super_generation(super) > transid) { 3203 brelse(latest); 3204 latest = bh; 3205 transid = btrfs_super_generation(super); 3206 } else { 3207 brelse(bh); 3208 } 3209 } 3210 3211 if (!latest) 3212 return ERR_PTR(ret); 3213 3214 return latest; 3215 } 3216 3217 /* 3218 * Write superblock @sb to the @device. Do not wait for completion, all the 3219 * buffer heads we write are pinned. 3220 * 3221 * Write @max_mirrors copies of the superblock, where 0 means default that fit 3222 * the expected device size at commit time. Note that max_mirrors must be 3223 * same for write and wait phases. 3224 * 3225 * Return number of errors when buffer head is not found or submission fails. 3226 */ 3227 static int write_dev_supers(struct btrfs_device *device, 3228 struct btrfs_super_block *sb, int max_mirrors) 3229 { 3230 struct buffer_head *bh; 3231 int i; 3232 int ret; 3233 int errors = 0; 3234 u32 crc; 3235 u64 bytenr; 3236 int op_flags; 3237 3238 if (max_mirrors == 0) 3239 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 3240 3241 for (i = 0; i < max_mirrors; i++) { 3242 bytenr = btrfs_sb_offset(i); 3243 if (bytenr + BTRFS_SUPER_INFO_SIZE >= 3244 device->commit_total_bytes) 3245 break; 3246 3247 btrfs_set_super_bytenr(sb, bytenr); 3248 3249 crc = ~(u32)0; 3250 crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc, 3251 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); 3252 btrfs_csum_final(crc, sb->csum); 3253 3254 /* One reference for us, and we leave it for the caller */ 3255 bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, 3256 BTRFS_SUPER_INFO_SIZE); 3257 if (!bh) { 3258 btrfs_err(device->fs_info, 3259 "couldn't get super buffer head for bytenr %llu", 3260 bytenr); 3261 errors++; 3262 continue; 3263 } 3264 3265 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); 3266 3267 /* one reference for submit_bh */ 3268 get_bh(bh); 3269 3270 set_buffer_uptodate(bh); 3271 lock_buffer(bh); 3272 bh->b_end_io = btrfs_end_buffer_write_sync; 3273 bh->b_private = device; 3274 3275 /* 3276 * we fua the first super. The others we allow 3277 * to go down lazy. 3278 */ 3279 op_flags = REQ_SYNC | REQ_META | REQ_PRIO; 3280 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER)) 3281 op_flags |= REQ_FUA; 3282 ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh); 3283 if (ret) 3284 errors++; 3285 } 3286 return errors < i ? 0 : -1; 3287 } 3288 3289 /* 3290 * Wait for write completion of superblocks done by write_dev_supers, 3291 * @max_mirrors same for write and wait phases. 3292 * 3293 * Return number of errors when buffer head is not found or not marked up to 3294 * date. 3295 */ 3296 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors) 3297 { 3298 struct buffer_head *bh; 3299 int i; 3300 int errors = 0; 3301 u64 bytenr; 3302 3303 if (max_mirrors == 0) 3304 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 3305 3306 for (i = 0; i < max_mirrors; i++) { 3307 bytenr = btrfs_sb_offset(i); 3308 if (bytenr + BTRFS_SUPER_INFO_SIZE >= 3309 device->commit_total_bytes) 3310 break; 3311 3312 bh = __find_get_block(device->bdev, 3313 bytenr / BTRFS_BDEV_BLOCKSIZE, 3314 BTRFS_SUPER_INFO_SIZE); 3315 if (!bh) { 3316 errors++; 3317 continue; 3318 } 3319 wait_on_buffer(bh); 3320 if (!buffer_uptodate(bh)) 3321 errors++; 3322 3323 /* drop our reference */ 3324 brelse(bh); 3325 3326 /* drop the reference from the writing run */ 3327 brelse(bh); 3328 } 3329 3330 return errors < i ? 0 : -1; 3331 } 3332 3333 /* 3334 * endio for the write_dev_flush, this will wake anyone waiting 3335 * for the barrier when it is done 3336 */ 3337 static void btrfs_end_empty_barrier(struct bio *bio) 3338 { 3339 complete(bio->bi_private); 3340 } 3341 3342 /* 3343 * Submit a flush request to the device if it supports it. Error handling is 3344 * done in the waiting counterpart. 3345 */ 3346 static void write_dev_flush(struct btrfs_device *device) 3347 { 3348 struct request_queue *q = bdev_get_queue(device->bdev); 3349 struct bio *bio = device->flush_bio; 3350 3351 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 3352 return; 3353 3354 bio_reset(bio); 3355 bio->bi_end_io = btrfs_end_empty_barrier; 3356 bio_set_dev(bio, device->bdev); 3357 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; 3358 init_completion(&device->flush_wait); 3359 bio->bi_private = &device->flush_wait; 3360 3361 btrfsic_submit_bio(bio); 3362 device->flush_bio_sent = 1; 3363 } 3364 3365 /* 3366 * If the flush bio has been submitted by write_dev_flush, wait for it. 3367 */ 3368 static blk_status_t wait_dev_flush(struct btrfs_device *device) 3369 { 3370 struct bio *bio = device->flush_bio; 3371 3372 if (!device->flush_bio_sent) 3373 return BLK_STS_OK; 3374 3375 device->flush_bio_sent = 0; 3376 wait_for_completion_io(&device->flush_wait); 3377 3378 return bio->bi_status; 3379 } 3380 3381 static int check_barrier_error(struct btrfs_fs_info *fs_info) 3382 { 3383 if (!btrfs_check_rw_degradable(fs_info)) 3384 return -EIO; 3385 return 0; 3386 } 3387 3388 /* 3389 * send an empty flush down to each device in parallel, 3390 * then wait for them 3391 */ 3392 static int barrier_all_devices(struct btrfs_fs_info *info) 3393 { 3394 struct list_head *head; 3395 struct btrfs_device *dev; 3396 int errors_wait = 0; 3397 blk_status_t ret; 3398 3399 /* send down all the barriers */ 3400 head = &info->fs_devices->devices; 3401 list_for_each_entry_rcu(dev, head, dev_list) { 3402 if (dev->missing) 3403 continue; 3404 if (!dev->bdev) 3405 continue; 3406 if (!dev->in_fs_metadata || !dev->writeable) 3407 continue; 3408 3409 write_dev_flush(dev); 3410 dev->last_flush_error = BLK_STS_OK; 3411 } 3412 3413 /* wait for all the barriers */ 3414 list_for_each_entry_rcu(dev, head, dev_list) { 3415 if (dev->missing) 3416 continue; 3417 if (!dev->bdev) { 3418 errors_wait++; 3419 continue; 3420 } 3421 if (!dev->in_fs_metadata || !dev->writeable) 3422 continue; 3423 3424 ret = wait_dev_flush(dev); 3425 if (ret) { 3426 dev->last_flush_error = ret; 3427 btrfs_dev_stat_inc_and_print(dev, 3428 BTRFS_DEV_STAT_FLUSH_ERRS); 3429 errors_wait++; 3430 } 3431 } 3432 3433 if (errors_wait) { 3434 /* 3435 * At some point we need the status of all disks 3436 * to arrive at the volume status. So error checking 3437 * is being pushed to a separate loop. 3438 */ 3439 return check_barrier_error(info); 3440 } 3441 return 0; 3442 } 3443 3444 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) 3445 { 3446 int raid_type; 3447 int min_tolerated = INT_MAX; 3448 3449 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || 3450 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) 3451 min_tolerated = min(min_tolerated, 3452 btrfs_raid_array[BTRFS_RAID_SINGLE]. 3453 tolerated_failures); 3454 3455 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 3456 if (raid_type == BTRFS_RAID_SINGLE) 3457 continue; 3458 if (!(flags & btrfs_raid_group[raid_type])) 3459 continue; 3460 min_tolerated = min(min_tolerated, 3461 btrfs_raid_array[raid_type]. 3462 tolerated_failures); 3463 } 3464 3465 if (min_tolerated == INT_MAX) { 3466 pr_warn("BTRFS: unknown raid flag: %llu", flags); 3467 min_tolerated = 0; 3468 } 3469 3470 return min_tolerated; 3471 } 3472 3473 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) 3474 { 3475 struct list_head *head; 3476 struct btrfs_device *dev; 3477 struct btrfs_super_block *sb; 3478 struct btrfs_dev_item *dev_item; 3479 int ret; 3480 int do_barriers; 3481 int max_errors; 3482 int total_errors = 0; 3483 u64 flags; 3484 3485 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); 3486 3487 /* 3488 * max_mirrors == 0 indicates we're from commit_transaction, 3489 * not from fsync where the tree roots in fs_info have not 3490 * been consistent on disk. 3491 */ 3492 if (max_mirrors == 0) 3493 backup_super_roots(fs_info); 3494 3495 sb = fs_info->super_for_commit; 3496 dev_item = &sb->dev_item; 3497 3498 mutex_lock(&fs_info->fs_devices->device_list_mutex); 3499 head = &fs_info->fs_devices->devices; 3500 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1; 3501 3502 if (do_barriers) { 3503 ret = barrier_all_devices(fs_info); 3504 if (ret) { 3505 mutex_unlock( 3506 &fs_info->fs_devices->device_list_mutex); 3507 btrfs_handle_fs_error(fs_info, ret, 3508 "errors while submitting device barriers."); 3509 return ret; 3510 } 3511 } 3512 3513 list_for_each_entry_rcu(dev, head, dev_list) { 3514 if (!dev->bdev) { 3515 total_errors++; 3516 continue; 3517 } 3518 if (!dev->in_fs_metadata || !dev->writeable) 3519 continue; 3520 3521 btrfs_set_stack_device_generation(dev_item, 0); 3522 btrfs_set_stack_device_type(dev_item, dev->type); 3523 btrfs_set_stack_device_id(dev_item, dev->devid); 3524 btrfs_set_stack_device_total_bytes(dev_item, 3525 dev->commit_total_bytes); 3526 btrfs_set_stack_device_bytes_used(dev_item, 3527 dev->commit_bytes_used); 3528 btrfs_set_stack_device_io_align(dev_item, dev->io_align); 3529 btrfs_set_stack_device_io_width(dev_item, dev->io_width); 3530 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); 3531 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); 3532 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE); 3533 3534 flags = btrfs_super_flags(sb); 3535 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); 3536 3537 ret = write_dev_supers(dev, sb, max_mirrors); 3538 if (ret) 3539 total_errors++; 3540 } 3541 if (total_errors > max_errors) { 3542 btrfs_err(fs_info, "%d errors while writing supers", 3543 total_errors); 3544 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 3545 3546 /* FUA is masked off if unsupported and can't be the reason */ 3547 btrfs_handle_fs_error(fs_info, -EIO, 3548 "%d errors while writing supers", 3549 total_errors); 3550 return -EIO; 3551 } 3552 3553 total_errors = 0; 3554 list_for_each_entry_rcu(dev, head, dev_list) { 3555 if (!dev->bdev) 3556 continue; 3557 if (!dev->in_fs_metadata || !dev->writeable) 3558 continue; 3559 3560 ret = wait_dev_supers(dev, max_mirrors); 3561 if (ret) 3562 total_errors++; 3563 } 3564 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 3565 if (total_errors > max_errors) { 3566 btrfs_handle_fs_error(fs_info, -EIO, 3567 "%d errors while writing supers", 3568 total_errors); 3569 return -EIO; 3570 } 3571 return 0; 3572 } 3573 3574 /* Drop a fs root from the radix tree and free it. */ 3575 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, 3576 struct btrfs_root *root) 3577 { 3578 spin_lock(&fs_info->fs_roots_radix_lock); 3579 radix_tree_delete(&fs_info->fs_roots_radix, 3580 (unsigned long)root->root_key.objectid); 3581 spin_unlock(&fs_info->fs_roots_radix_lock); 3582 3583 if (btrfs_root_refs(&root->root_item) == 0) 3584 synchronize_srcu(&fs_info->subvol_srcu); 3585 3586 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 3587 btrfs_free_log(NULL, root); 3588 if (root->reloc_root) { 3589 free_extent_buffer(root->reloc_root->node); 3590 free_extent_buffer(root->reloc_root->commit_root); 3591 btrfs_put_fs_root(root->reloc_root); 3592 root->reloc_root = NULL; 3593 } 3594 } 3595 3596 if (root->free_ino_pinned) 3597 __btrfs_remove_free_space_cache(root->free_ino_pinned); 3598 if (root->free_ino_ctl) 3599 __btrfs_remove_free_space_cache(root->free_ino_ctl); 3600 free_fs_root(root); 3601 } 3602 3603 static void free_fs_root(struct btrfs_root *root) 3604 { 3605 iput(root->ino_cache_inode); 3606 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); 3607 btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv); 3608 root->orphan_block_rsv = NULL; 3609 if (root->anon_dev) 3610 free_anon_bdev(root->anon_dev); 3611 if (root->subv_writers) 3612 btrfs_free_subvolume_writers(root->subv_writers); 3613 free_extent_buffer(root->node); 3614 free_extent_buffer(root->commit_root); 3615 kfree(root->free_ino_ctl); 3616 kfree(root->free_ino_pinned); 3617 kfree(root->name); 3618 btrfs_put_fs_root(root); 3619 } 3620 3621 void btrfs_free_fs_root(struct btrfs_root *root) 3622 { 3623 free_fs_root(root); 3624 } 3625 3626 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) 3627 { 3628 u64 root_objectid = 0; 3629 struct btrfs_root *gang[8]; 3630 int i = 0; 3631 int err = 0; 3632 unsigned int ret = 0; 3633 int index; 3634 3635 while (1) { 3636 index = srcu_read_lock(&fs_info->subvol_srcu); 3637 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 3638 (void **)gang, root_objectid, 3639 ARRAY_SIZE(gang)); 3640 if (!ret) { 3641 srcu_read_unlock(&fs_info->subvol_srcu, index); 3642 break; 3643 } 3644 root_objectid = gang[ret - 1]->root_key.objectid + 1; 3645 3646 for (i = 0; i < ret; i++) { 3647 /* Avoid to grab roots in dead_roots */ 3648 if (btrfs_root_refs(&gang[i]->root_item) == 0) { 3649 gang[i] = NULL; 3650 continue; 3651 } 3652 /* grab all the search result for later use */ 3653 gang[i] = btrfs_grab_fs_root(gang[i]); 3654 } 3655 srcu_read_unlock(&fs_info->subvol_srcu, index); 3656 3657 for (i = 0; i < ret; i++) { 3658 if (!gang[i]) 3659 continue; 3660 root_objectid = gang[i]->root_key.objectid; 3661 err = btrfs_orphan_cleanup(gang[i]); 3662 if (err) 3663 break; 3664 btrfs_put_fs_root(gang[i]); 3665 } 3666 root_objectid++; 3667 } 3668 3669 /* release the uncleaned roots due to error */ 3670 for (; i < ret; i++) { 3671 if (gang[i]) 3672 btrfs_put_fs_root(gang[i]); 3673 } 3674 return err; 3675 } 3676 3677 int btrfs_commit_super(struct btrfs_fs_info *fs_info) 3678 { 3679 struct btrfs_root *root = fs_info->tree_root; 3680 struct btrfs_trans_handle *trans; 3681 3682 mutex_lock(&fs_info->cleaner_mutex); 3683 btrfs_run_delayed_iputs(fs_info); 3684 mutex_unlock(&fs_info->cleaner_mutex); 3685 wake_up_process(fs_info->cleaner_kthread); 3686 3687 /* wait until ongoing cleanup work done */ 3688 down_write(&fs_info->cleanup_work_sem); 3689 up_write(&fs_info->cleanup_work_sem); 3690 3691 trans = btrfs_join_transaction(root); 3692 if (IS_ERR(trans)) 3693 return PTR_ERR(trans); 3694 return btrfs_commit_transaction(trans); 3695 } 3696 3697 void close_ctree(struct btrfs_fs_info *fs_info) 3698 { 3699 struct btrfs_root *root = fs_info->tree_root; 3700 int ret; 3701 3702 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); 3703 3704 /* wait for the qgroup rescan worker to stop */ 3705 btrfs_qgroup_wait_for_completion(fs_info, false); 3706 3707 /* wait for the uuid_scan task to finish */ 3708 down(&fs_info->uuid_tree_rescan_sem); 3709 /* avoid complains from lockdep et al., set sem back to initial state */ 3710 up(&fs_info->uuid_tree_rescan_sem); 3711 3712 /* pause restriper - we want to resume on mount */ 3713 btrfs_pause_balance(fs_info); 3714 3715 btrfs_dev_replace_suspend_for_unmount(fs_info); 3716 3717 btrfs_scrub_cancel(fs_info); 3718 3719 /* wait for any defraggers to finish */ 3720 wait_event(fs_info->transaction_wait, 3721 (atomic_read(&fs_info->defrag_running) == 0)); 3722 3723 /* clear out the rbtree of defraggable inodes */ 3724 btrfs_cleanup_defrag_inodes(fs_info); 3725 3726 cancel_work_sync(&fs_info->async_reclaim_work); 3727 3728 if (!sb_rdonly(fs_info->sb)) { 3729 /* 3730 * If the cleaner thread is stopped and there are 3731 * block groups queued for removal, the deletion will be 3732 * skipped when we quit the cleaner thread. 3733 */ 3734 btrfs_delete_unused_bgs(fs_info); 3735 3736 ret = btrfs_commit_super(fs_info); 3737 if (ret) 3738 btrfs_err(fs_info, "commit super ret %d", ret); 3739 } 3740 3741 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 3742 btrfs_error_commit_super(fs_info); 3743 3744 kthread_stop(fs_info->transaction_kthread); 3745 kthread_stop(fs_info->cleaner_kthread); 3746 3747 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); 3748 3749 btrfs_free_qgroup_config(fs_info); 3750 3751 if (percpu_counter_sum(&fs_info->delalloc_bytes)) { 3752 btrfs_info(fs_info, "at unmount delalloc count %lld", 3753 percpu_counter_sum(&fs_info->delalloc_bytes)); 3754 } 3755 3756 btrfs_sysfs_remove_mounted(fs_info); 3757 btrfs_sysfs_remove_fsid(fs_info->fs_devices); 3758 3759 btrfs_free_fs_roots(fs_info); 3760 3761 btrfs_put_block_group_cache(fs_info); 3762 3763 /* 3764 * we must make sure there is not any read request to 3765 * submit after we stopping all workers. 3766 */ 3767 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 3768 btrfs_stop_all_workers(fs_info); 3769 3770 btrfs_free_block_groups(fs_info); 3771 3772 clear_bit(BTRFS_FS_OPEN, &fs_info->flags); 3773 free_root_pointers(fs_info, 1); 3774 3775 iput(fs_info->btree_inode); 3776 3777 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 3778 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) 3779 btrfsic_unmount(fs_info->fs_devices); 3780 #endif 3781 3782 btrfs_close_devices(fs_info->fs_devices); 3783 btrfs_mapping_tree_free(&fs_info->mapping_tree); 3784 3785 percpu_counter_destroy(&fs_info->dirty_metadata_bytes); 3786 percpu_counter_destroy(&fs_info->delalloc_bytes); 3787 percpu_counter_destroy(&fs_info->bio_counter); 3788 cleanup_srcu_struct(&fs_info->subvol_srcu); 3789 3790 btrfs_free_stripe_hash_table(fs_info); 3791 btrfs_free_ref_cache(fs_info); 3792 3793 __btrfs_free_block_rsv(root->orphan_block_rsv); 3794 root->orphan_block_rsv = NULL; 3795 3796 while (!list_empty(&fs_info->pinned_chunks)) { 3797 struct extent_map *em; 3798 3799 em = list_first_entry(&fs_info->pinned_chunks, 3800 struct extent_map, list); 3801 list_del_init(&em->list); 3802 free_extent_map(em); 3803 } 3804 } 3805 3806 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, 3807 int atomic) 3808 { 3809 int ret; 3810 struct inode *btree_inode = buf->pages[0]->mapping->host; 3811 3812 ret = extent_buffer_uptodate(buf); 3813 if (!ret) 3814 return ret; 3815 3816 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, 3817 parent_transid, atomic); 3818 if (ret == -EAGAIN) 3819 return ret; 3820 return !ret; 3821 } 3822 3823 void btrfs_mark_buffer_dirty(struct extent_buffer *buf) 3824 { 3825 struct btrfs_fs_info *fs_info; 3826 struct btrfs_root *root; 3827 u64 transid = btrfs_header_generation(buf); 3828 int was_dirty; 3829 3830 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3831 /* 3832 * This is a fast path so only do this check if we have sanity tests 3833 * enabled. Normal people shouldn't be marking dummy buffers as dirty 3834 * outside of the sanity tests. 3835 */ 3836 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags))) 3837 return; 3838 #endif 3839 root = BTRFS_I(buf->pages[0]->mapping->host)->root; 3840 fs_info = root->fs_info; 3841 btrfs_assert_tree_locked(buf); 3842 if (transid != fs_info->generation) 3843 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n", 3844 buf->start, transid, fs_info->generation); 3845 was_dirty = set_extent_buffer_dirty(buf); 3846 if (!was_dirty) 3847 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, 3848 buf->len, 3849 fs_info->dirty_metadata_batch); 3850 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 3851 /* 3852 * Since btrfs_mark_buffer_dirty() can be called with item pointer set 3853 * but item data not updated. 3854 * So here we should only check item pointers, not item data. 3855 */ 3856 if (btrfs_header_level(buf) == 0 && 3857 btrfs_check_leaf_relaxed(root, buf)) { 3858 btrfs_print_leaf(buf); 3859 ASSERT(0); 3860 } 3861 #endif 3862 } 3863 3864 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, 3865 int flush_delayed) 3866 { 3867 /* 3868 * looks as though older kernels can get into trouble with 3869 * this code, they end up stuck in balance_dirty_pages forever 3870 */ 3871 int ret; 3872 3873 if (current->flags & PF_MEMALLOC) 3874 return; 3875 3876 if (flush_delayed) 3877 btrfs_balance_delayed_items(fs_info); 3878 3879 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, 3880 BTRFS_DIRTY_METADATA_THRESH); 3881 if (ret > 0) { 3882 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping); 3883 } 3884 } 3885 3886 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info) 3887 { 3888 __btrfs_btree_balance_dirty(fs_info, 1); 3889 } 3890 3891 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info) 3892 { 3893 __btrfs_btree_balance_dirty(fs_info, 0); 3894 } 3895 3896 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) 3897 { 3898 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; 3899 struct btrfs_fs_info *fs_info = root->fs_info; 3900 3901 return btree_read_extent_buffer_pages(fs_info, buf, parent_transid); 3902 } 3903 3904 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info) 3905 { 3906 struct btrfs_super_block *sb = fs_info->super_copy; 3907 u64 nodesize = btrfs_super_nodesize(sb); 3908 u64 sectorsize = btrfs_super_sectorsize(sb); 3909 int ret = 0; 3910 3911 if (btrfs_super_magic(sb) != BTRFS_MAGIC) { 3912 btrfs_err(fs_info, "no valid FS found"); 3913 ret = -EINVAL; 3914 } 3915 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) 3916 btrfs_warn(fs_info, "unrecognized super flag: %llu", 3917 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); 3918 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { 3919 btrfs_err(fs_info, "tree_root level too big: %d >= %d", 3920 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); 3921 ret = -EINVAL; 3922 } 3923 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { 3924 btrfs_err(fs_info, "chunk_root level too big: %d >= %d", 3925 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); 3926 ret = -EINVAL; 3927 } 3928 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { 3929 btrfs_err(fs_info, "log_root level too big: %d >= %d", 3930 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); 3931 ret = -EINVAL; 3932 } 3933 3934 /* 3935 * Check sectorsize and nodesize first, other check will need it. 3936 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here. 3937 */ 3938 if (!is_power_of_2(sectorsize) || sectorsize < 4096 || 3939 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { 3940 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize); 3941 ret = -EINVAL; 3942 } 3943 /* Only PAGE SIZE is supported yet */ 3944 if (sectorsize != PAGE_SIZE) { 3945 btrfs_err(fs_info, 3946 "sectorsize %llu not supported yet, only support %lu", 3947 sectorsize, PAGE_SIZE); 3948 ret = -EINVAL; 3949 } 3950 if (!is_power_of_2(nodesize) || nodesize < sectorsize || 3951 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) { 3952 btrfs_err(fs_info, "invalid nodesize %llu", nodesize); 3953 ret = -EINVAL; 3954 } 3955 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) { 3956 btrfs_err(fs_info, "invalid leafsize %u, should be %llu", 3957 le32_to_cpu(sb->__unused_leafsize), nodesize); 3958 ret = -EINVAL; 3959 } 3960 3961 /* Root alignment check */ 3962 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { 3963 btrfs_warn(fs_info, "tree_root block unaligned: %llu", 3964 btrfs_super_root(sb)); 3965 ret = -EINVAL; 3966 } 3967 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { 3968 btrfs_warn(fs_info, "chunk_root block unaligned: %llu", 3969 btrfs_super_chunk_root(sb)); 3970 ret = -EINVAL; 3971 } 3972 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { 3973 btrfs_warn(fs_info, "log_root block unaligned: %llu", 3974 btrfs_super_log_root(sb)); 3975 ret = -EINVAL; 3976 } 3977 3978 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) { 3979 btrfs_err(fs_info, 3980 "dev_item UUID does not match fsid: %pU != %pU", 3981 fs_info->fsid, sb->dev_item.fsid); 3982 ret = -EINVAL; 3983 } 3984 3985 /* 3986 * Hint to catch really bogus numbers, bitflips or so, more exact checks are 3987 * done later 3988 */ 3989 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) { 3990 btrfs_err(fs_info, "bytes_used is too small %llu", 3991 btrfs_super_bytes_used(sb)); 3992 ret = -EINVAL; 3993 } 3994 if (!is_power_of_2(btrfs_super_stripesize(sb))) { 3995 btrfs_err(fs_info, "invalid stripesize %u", 3996 btrfs_super_stripesize(sb)); 3997 ret = -EINVAL; 3998 } 3999 if (btrfs_super_num_devices(sb) > (1UL << 31)) 4000 btrfs_warn(fs_info, "suspicious number of devices: %llu", 4001 btrfs_super_num_devices(sb)); 4002 if (btrfs_super_num_devices(sb) == 0) { 4003 btrfs_err(fs_info, "number of devices is 0"); 4004 ret = -EINVAL; 4005 } 4006 4007 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) { 4008 btrfs_err(fs_info, "super offset mismatch %llu != %u", 4009 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); 4010 ret = -EINVAL; 4011 } 4012 4013 /* 4014 * Obvious sys_chunk_array corruptions, it must hold at least one key 4015 * and one chunk 4016 */ 4017 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4018 btrfs_err(fs_info, "system chunk array too big %u > %u", 4019 btrfs_super_sys_array_size(sb), 4020 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); 4021 ret = -EINVAL; 4022 } 4023 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) 4024 + sizeof(struct btrfs_chunk)) { 4025 btrfs_err(fs_info, "system chunk array too small %u < %zu", 4026 btrfs_super_sys_array_size(sb), 4027 sizeof(struct btrfs_disk_key) 4028 + sizeof(struct btrfs_chunk)); 4029 ret = -EINVAL; 4030 } 4031 4032 /* 4033 * The generation is a global counter, we'll trust it more than the others 4034 * but it's still possible that it's the one that's wrong. 4035 */ 4036 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) 4037 btrfs_warn(fs_info, 4038 "suspicious: generation < chunk_root_generation: %llu < %llu", 4039 btrfs_super_generation(sb), 4040 btrfs_super_chunk_root_generation(sb)); 4041 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) 4042 && btrfs_super_cache_generation(sb) != (u64)-1) 4043 btrfs_warn(fs_info, 4044 "suspicious: generation < cache_generation: %llu < %llu", 4045 btrfs_super_generation(sb), 4046 btrfs_super_cache_generation(sb)); 4047 4048 return ret; 4049 } 4050 4051 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) 4052 { 4053 mutex_lock(&fs_info->cleaner_mutex); 4054 btrfs_run_delayed_iputs(fs_info); 4055 mutex_unlock(&fs_info->cleaner_mutex); 4056 4057 down_write(&fs_info->cleanup_work_sem); 4058 up_write(&fs_info->cleanup_work_sem); 4059 4060 /* cleanup FS via transaction */ 4061 btrfs_cleanup_transaction(fs_info); 4062 } 4063 4064 static void btrfs_destroy_ordered_extents(struct btrfs_root *root) 4065 { 4066 struct btrfs_ordered_extent *ordered; 4067 4068 spin_lock(&root->ordered_extent_lock); 4069 /* 4070 * This will just short circuit the ordered completion stuff which will 4071 * make sure the ordered extent gets properly cleaned up. 4072 */ 4073 list_for_each_entry(ordered, &root->ordered_extents, 4074 root_extent_list) 4075 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); 4076 spin_unlock(&root->ordered_extent_lock); 4077 } 4078 4079 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) 4080 { 4081 struct btrfs_root *root; 4082 struct list_head splice; 4083 4084 INIT_LIST_HEAD(&splice); 4085 4086 spin_lock(&fs_info->ordered_root_lock); 4087 list_splice_init(&fs_info->ordered_roots, &splice); 4088 while (!list_empty(&splice)) { 4089 root = list_first_entry(&splice, struct btrfs_root, 4090 ordered_root); 4091 list_move_tail(&root->ordered_root, 4092 &fs_info->ordered_roots); 4093 4094 spin_unlock(&fs_info->ordered_root_lock); 4095 btrfs_destroy_ordered_extents(root); 4096 4097 cond_resched(); 4098 spin_lock(&fs_info->ordered_root_lock); 4099 } 4100 spin_unlock(&fs_info->ordered_root_lock); 4101 } 4102 4103 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 4104 struct btrfs_fs_info *fs_info) 4105 { 4106 struct rb_node *node; 4107 struct btrfs_delayed_ref_root *delayed_refs; 4108 struct btrfs_delayed_ref_node *ref; 4109 int ret = 0; 4110 4111 delayed_refs = &trans->delayed_refs; 4112 4113 spin_lock(&delayed_refs->lock); 4114 if (atomic_read(&delayed_refs->num_entries) == 0) { 4115 spin_unlock(&delayed_refs->lock); 4116 btrfs_info(fs_info, "delayed_refs has NO entry"); 4117 return ret; 4118 } 4119 4120 while ((node = rb_first(&delayed_refs->href_root)) != NULL) { 4121 struct btrfs_delayed_ref_head *head; 4122 struct rb_node *n; 4123 bool pin_bytes = false; 4124 4125 head = rb_entry(node, struct btrfs_delayed_ref_head, 4126 href_node); 4127 if (!mutex_trylock(&head->mutex)) { 4128 refcount_inc(&head->refs); 4129 spin_unlock(&delayed_refs->lock); 4130 4131 mutex_lock(&head->mutex); 4132 mutex_unlock(&head->mutex); 4133 btrfs_put_delayed_ref_head(head); 4134 spin_lock(&delayed_refs->lock); 4135 continue; 4136 } 4137 spin_lock(&head->lock); 4138 while ((n = rb_first(&head->ref_tree)) != NULL) { 4139 ref = rb_entry(n, struct btrfs_delayed_ref_node, 4140 ref_node); 4141 ref->in_tree = 0; 4142 rb_erase(&ref->ref_node, &head->ref_tree); 4143 RB_CLEAR_NODE(&ref->ref_node); 4144 if (!list_empty(&ref->add_list)) 4145 list_del(&ref->add_list); 4146 atomic_dec(&delayed_refs->num_entries); 4147 btrfs_put_delayed_ref(ref); 4148 } 4149 if (head->must_insert_reserved) 4150 pin_bytes = true; 4151 btrfs_free_delayed_extent_op(head->extent_op); 4152 delayed_refs->num_heads--; 4153 if (head->processing == 0) 4154 delayed_refs->num_heads_ready--; 4155 atomic_dec(&delayed_refs->num_entries); 4156 rb_erase(&head->href_node, &delayed_refs->href_root); 4157 RB_CLEAR_NODE(&head->href_node); 4158 spin_unlock(&head->lock); 4159 spin_unlock(&delayed_refs->lock); 4160 mutex_unlock(&head->mutex); 4161 4162 if (pin_bytes) 4163 btrfs_pin_extent(fs_info, head->bytenr, 4164 head->num_bytes, 1); 4165 btrfs_put_delayed_ref_head(head); 4166 cond_resched(); 4167 spin_lock(&delayed_refs->lock); 4168 } 4169 4170 spin_unlock(&delayed_refs->lock); 4171 4172 return ret; 4173 } 4174 4175 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) 4176 { 4177 struct btrfs_inode *btrfs_inode; 4178 struct list_head splice; 4179 4180 INIT_LIST_HEAD(&splice); 4181 4182 spin_lock(&root->delalloc_lock); 4183 list_splice_init(&root->delalloc_inodes, &splice); 4184 4185 while (!list_empty(&splice)) { 4186 btrfs_inode = list_first_entry(&splice, struct btrfs_inode, 4187 delalloc_inodes); 4188 4189 list_del_init(&btrfs_inode->delalloc_inodes); 4190 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 4191 &btrfs_inode->runtime_flags); 4192 spin_unlock(&root->delalloc_lock); 4193 4194 btrfs_invalidate_inodes(btrfs_inode->root); 4195 4196 spin_lock(&root->delalloc_lock); 4197 } 4198 4199 spin_unlock(&root->delalloc_lock); 4200 } 4201 4202 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) 4203 { 4204 struct btrfs_root *root; 4205 struct list_head splice; 4206 4207 INIT_LIST_HEAD(&splice); 4208 4209 spin_lock(&fs_info->delalloc_root_lock); 4210 list_splice_init(&fs_info->delalloc_roots, &splice); 4211 while (!list_empty(&splice)) { 4212 root = list_first_entry(&splice, struct btrfs_root, 4213 delalloc_root); 4214 list_del_init(&root->delalloc_root); 4215 root = btrfs_grab_fs_root(root); 4216 BUG_ON(!root); 4217 spin_unlock(&fs_info->delalloc_root_lock); 4218 4219 btrfs_destroy_delalloc_inodes(root); 4220 btrfs_put_fs_root(root); 4221 4222 spin_lock(&fs_info->delalloc_root_lock); 4223 } 4224 spin_unlock(&fs_info->delalloc_root_lock); 4225 } 4226 4227 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, 4228 struct extent_io_tree *dirty_pages, 4229 int mark) 4230 { 4231 int ret; 4232 struct extent_buffer *eb; 4233 u64 start = 0; 4234 u64 end; 4235 4236 while (1) { 4237 ret = find_first_extent_bit(dirty_pages, start, &start, &end, 4238 mark, NULL); 4239 if (ret) 4240 break; 4241 4242 clear_extent_bits(dirty_pages, start, end, mark); 4243 while (start <= end) { 4244 eb = find_extent_buffer(fs_info, start); 4245 start += fs_info->nodesize; 4246 if (!eb) 4247 continue; 4248 wait_on_extent_buffer_writeback(eb); 4249 4250 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, 4251 &eb->bflags)) 4252 clear_extent_buffer_dirty(eb); 4253 free_extent_buffer_stale(eb); 4254 } 4255 } 4256 4257 return ret; 4258 } 4259 4260 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, 4261 struct extent_io_tree *pinned_extents) 4262 { 4263 struct extent_io_tree *unpin; 4264 u64 start; 4265 u64 end; 4266 int ret; 4267 bool loop = true; 4268 4269 unpin = pinned_extents; 4270 again: 4271 while (1) { 4272 ret = find_first_extent_bit(unpin, 0, &start, &end, 4273 EXTENT_DIRTY, NULL); 4274 if (ret) 4275 break; 4276 4277 clear_extent_dirty(unpin, start, end); 4278 btrfs_error_unpin_extent_range(fs_info, start, end); 4279 cond_resched(); 4280 } 4281 4282 if (loop) { 4283 if (unpin == &fs_info->freed_extents[0]) 4284 unpin = &fs_info->freed_extents[1]; 4285 else 4286 unpin = &fs_info->freed_extents[0]; 4287 loop = false; 4288 goto again; 4289 } 4290 4291 return 0; 4292 } 4293 4294 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) 4295 { 4296 struct inode *inode; 4297 4298 inode = cache->io_ctl.inode; 4299 if (inode) { 4300 invalidate_inode_pages2(inode->i_mapping); 4301 BTRFS_I(inode)->generation = 0; 4302 cache->io_ctl.inode = NULL; 4303 iput(inode); 4304 } 4305 btrfs_put_block_group(cache); 4306 } 4307 4308 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, 4309 struct btrfs_fs_info *fs_info) 4310 { 4311 struct btrfs_block_group_cache *cache; 4312 4313 spin_lock(&cur_trans->dirty_bgs_lock); 4314 while (!list_empty(&cur_trans->dirty_bgs)) { 4315 cache = list_first_entry(&cur_trans->dirty_bgs, 4316 struct btrfs_block_group_cache, 4317 dirty_list); 4318 if (!cache) { 4319 btrfs_err(fs_info, "orphan block group dirty_bgs list"); 4320 spin_unlock(&cur_trans->dirty_bgs_lock); 4321 return; 4322 } 4323 4324 if (!list_empty(&cache->io_list)) { 4325 spin_unlock(&cur_trans->dirty_bgs_lock); 4326 list_del_init(&cache->io_list); 4327 btrfs_cleanup_bg_io(cache); 4328 spin_lock(&cur_trans->dirty_bgs_lock); 4329 } 4330 4331 list_del_init(&cache->dirty_list); 4332 spin_lock(&cache->lock); 4333 cache->disk_cache_state = BTRFS_DC_ERROR; 4334 spin_unlock(&cache->lock); 4335 4336 spin_unlock(&cur_trans->dirty_bgs_lock); 4337 btrfs_put_block_group(cache); 4338 spin_lock(&cur_trans->dirty_bgs_lock); 4339 } 4340 spin_unlock(&cur_trans->dirty_bgs_lock); 4341 4342 while (!list_empty(&cur_trans->io_bgs)) { 4343 cache = list_first_entry(&cur_trans->io_bgs, 4344 struct btrfs_block_group_cache, 4345 io_list); 4346 if (!cache) { 4347 btrfs_err(fs_info, "orphan block group on io_bgs list"); 4348 return; 4349 } 4350 4351 list_del_init(&cache->io_list); 4352 spin_lock(&cache->lock); 4353 cache->disk_cache_state = BTRFS_DC_ERROR; 4354 spin_unlock(&cache->lock); 4355 btrfs_cleanup_bg_io(cache); 4356 } 4357 } 4358 4359 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, 4360 struct btrfs_fs_info *fs_info) 4361 { 4362 btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 4363 ASSERT(list_empty(&cur_trans->dirty_bgs)); 4364 ASSERT(list_empty(&cur_trans->io_bgs)); 4365 4366 btrfs_destroy_delayed_refs(cur_trans, fs_info); 4367 4368 cur_trans->state = TRANS_STATE_COMMIT_START; 4369 wake_up(&fs_info->transaction_blocked_wait); 4370 4371 cur_trans->state = TRANS_STATE_UNBLOCKED; 4372 wake_up(&fs_info->transaction_wait); 4373 4374 btrfs_destroy_delayed_inodes(fs_info); 4375 btrfs_assert_delayed_root_empty(fs_info); 4376 4377 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, 4378 EXTENT_DIRTY); 4379 btrfs_destroy_pinned_extent(fs_info, 4380 fs_info->pinned_extents); 4381 4382 cur_trans->state =TRANS_STATE_COMPLETED; 4383 wake_up(&cur_trans->commit_wait); 4384 } 4385 4386 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) 4387 { 4388 struct btrfs_transaction *t; 4389 4390 mutex_lock(&fs_info->transaction_kthread_mutex); 4391 4392 spin_lock(&fs_info->trans_lock); 4393 while (!list_empty(&fs_info->trans_list)) { 4394 t = list_first_entry(&fs_info->trans_list, 4395 struct btrfs_transaction, list); 4396 if (t->state >= TRANS_STATE_COMMIT_START) { 4397 refcount_inc(&t->use_count); 4398 spin_unlock(&fs_info->trans_lock); 4399 btrfs_wait_for_commit(fs_info, t->transid); 4400 btrfs_put_transaction(t); 4401 spin_lock(&fs_info->trans_lock); 4402 continue; 4403 } 4404 if (t == fs_info->running_transaction) { 4405 t->state = TRANS_STATE_COMMIT_DOING; 4406 spin_unlock(&fs_info->trans_lock); 4407 /* 4408 * We wait for 0 num_writers since we don't hold a trans 4409 * handle open currently for this transaction. 4410 */ 4411 wait_event(t->writer_wait, 4412 atomic_read(&t->num_writers) == 0); 4413 } else { 4414 spin_unlock(&fs_info->trans_lock); 4415 } 4416 btrfs_cleanup_one_transaction(t, fs_info); 4417 4418 spin_lock(&fs_info->trans_lock); 4419 if (t == fs_info->running_transaction) 4420 fs_info->running_transaction = NULL; 4421 list_del_init(&t->list); 4422 spin_unlock(&fs_info->trans_lock); 4423 4424 btrfs_put_transaction(t); 4425 trace_btrfs_transaction_commit(fs_info->tree_root); 4426 spin_lock(&fs_info->trans_lock); 4427 } 4428 spin_unlock(&fs_info->trans_lock); 4429 btrfs_destroy_all_ordered_extents(fs_info); 4430 btrfs_destroy_delayed_inodes(fs_info); 4431 btrfs_assert_delayed_root_empty(fs_info); 4432 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); 4433 btrfs_destroy_all_delalloc_inodes(fs_info); 4434 mutex_unlock(&fs_info->transaction_kthread_mutex); 4435 4436 return 0; 4437 } 4438 4439 static struct btrfs_fs_info *btree_fs_info(void *private_data) 4440 { 4441 struct inode *inode = private_data; 4442 return btrfs_sb(inode->i_sb); 4443 } 4444 4445 static const struct extent_io_ops btree_extent_io_ops = { 4446 /* mandatory callbacks */ 4447 .submit_bio_hook = btree_submit_bio_hook, 4448 .readpage_end_io_hook = btree_readpage_end_io_hook, 4449 /* note we're sharing with inode.c for the merge bio hook */ 4450 .merge_bio_hook = btrfs_merge_bio_hook, 4451 .readpage_io_failed_hook = btree_io_failed_hook, 4452 .set_range_writeback = btrfs_set_range_writeback, 4453 .tree_fs_info = btree_fs_info, 4454 4455 /* optional callbacks */ 4456 }; 4457