1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/blkdev.h> 8 #include <linux/radix-tree.h> 9 #include <linux/writeback.h> 10 #include <linux/workqueue.h> 11 #include <linux/kthread.h> 12 #include <linux/slab.h> 13 #include <linux/migrate.h> 14 #include <linux/ratelimit.h> 15 #include <linux/uuid.h> 16 #include <linux/semaphore.h> 17 #include <linux/error-injection.h> 18 #include <linux/crc32c.h> 19 #include <linux/sched/mm.h> 20 #include <asm/unaligned.h> 21 #include <crypto/hash.h> 22 #include "ctree.h" 23 #include "disk-io.h" 24 #include "transaction.h" 25 #include "btrfs_inode.h" 26 #include "volumes.h" 27 #include "print-tree.h" 28 #include "locking.h" 29 #include "tree-log.h" 30 #include "free-space-cache.h" 31 #include "free-space-tree.h" 32 #include "check-integrity.h" 33 #include "rcu-string.h" 34 #include "dev-replace.h" 35 #include "raid56.h" 36 #include "sysfs.h" 37 #include "qgroup.h" 38 #include "compression.h" 39 #include "tree-checker.h" 40 #include "ref-verify.h" 41 #include "block-group.h" 42 #include "discard.h" 43 #include "space-info.h" 44 #include "zoned.h" 45 #include "subpage.h" 46 47 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ 48 BTRFS_HEADER_FLAG_RELOC |\ 49 BTRFS_SUPER_FLAG_ERROR |\ 50 BTRFS_SUPER_FLAG_SEEDING |\ 51 BTRFS_SUPER_FLAG_METADUMP |\ 52 BTRFS_SUPER_FLAG_METADUMP_V2) 53 54 static void end_workqueue_fn(struct btrfs_work *work); 55 static void btrfs_destroy_ordered_extents(struct btrfs_root *root); 56 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 57 struct btrfs_fs_info *fs_info); 58 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); 59 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, 60 struct extent_io_tree *dirty_pages, 61 int mark); 62 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, 63 struct extent_io_tree *pinned_extents); 64 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info); 65 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info); 66 67 /* 68 * btrfs_end_io_wq structs are used to do processing in task context when an IO 69 * is complete. This is used during reads to verify checksums, and it is used 70 * by writes to insert metadata for new file extents after IO is complete. 71 */ 72 struct btrfs_end_io_wq { 73 struct bio *bio; 74 bio_end_io_t *end_io; 75 void *private; 76 struct btrfs_fs_info *info; 77 blk_status_t status; 78 enum btrfs_wq_endio_type metadata; 79 struct btrfs_work work; 80 }; 81 82 static struct kmem_cache *btrfs_end_io_wq_cache; 83 84 int __init btrfs_end_io_wq_init(void) 85 { 86 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq", 87 sizeof(struct btrfs_end_io_wq), 88 0, 89 SLAB_MEM_SPREAD, 90 NULL); 91 if (!btrfs_end_io_wq_cache) 92 return -ENOMEM; 93 return 0; 94 } 95 96 void __cold btrfs_end_io_wq_exit(void) 97 { 98 kmem_cache_destroy(btrfs_end_io_wq_cache); 99 } 100 101 static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info) 102 { 103 if (fs_info->csum_shash) 104 crypto_free_shash(fs_info->csum_shash); 105 } 106 107 /* 108 * async submit bios are used to offload expensive checksumming 109 * onto the worker threads. They checksum file and metadata bios 110 * just before they are sent down the IO stack. 111 */ 112 struct async_submit_bio { 113 struct inode *inode; 114 struct bio *bio; 115 extent_submit_bio_start_t *submit_bio_start; 116 int mirror_num; 117 118 /* Optional parameter for submit_bio_start used by direct io */ 119 u64 dio_file_offset; 120 struct btrfs_work work; 121 blk_status_t status; 122 }; 123 124 /* 125 * Lockdep class keys for extent_buffer->lock's in this root. For a given 126 * eb, the lockdep key is determined by the btrfs_root it belongs to and 127 * the level the eb occupies in the tree. 128 * 129 * Different roots are used for different purposes and may nest inside each 130 * other and they require separate keysets. As lockdep keys should be 131 * static, assign keysets according to the purpose of the root as indicated 132 * by btrfs_root->root_key.objectid. This ensures that all special purpose 133 * roots have separate keysets. 134 * 135 * Lock-nesting across peer nodes is always done with the immediate parent 136 * node locked thus preventing deadlock. As lockdep doesn't know this, use 137 * subclass to avoid triggering lockdep warning in such cases. 138 * 139 * The key is set by the readpage_end_io_hook after the buffer has passed 140 * csum validation but before the pages are unlocked. It is also set by 141 * btrfs_init_new_buffer on freshly allocated blocks. 142 * 143 * We also add a check to make sure the highest level of the tree is the 144 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code 145 * needs update as well. 146 */ 147 #ifdef CONFIG_DEBUG_LOCK_ALLOC 148 # if BTRFS_MAX_LEVEL != 8 149 # error 150 # endif 151 152 #define DEFINE_LEVEL(stem, level) \ 153 .names[level] = "btrfs-" stem "-0" #level, 154 155 #define DEFINE_NAME(stem) \ 156 DEFINE_LEVEL(stem, 0) \ 157 DEFINE_LEVEL(stem, 1) \ 158 DEFINE_LEVEL(stem, 2) \ 159 DEFINE_LEVEL(stem, 3) \ 160 DEFINE_LEVEL(stem, 4) \ 161 DEFINE_LEVEL(stem, 5) \ 162 DEFINE_LEVEL(stem, 6) \ 163 DEFINE_LEVEL(stem, 7) 164 165 static struct btrfs_lockdep_keyset { 166 u64 id; /* root objectid */ 167 /* Longest entry: btrfs-free-space-00 */ 168 char names[BTRFS_MAX_LEVEL][20]; 169 struct lock_class_key keys[BTRFS_MAX_LEVEL]; 170 } btrfs_lockdep_keysets[] = { 171 { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") }, 172 { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") }, 173 { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") }, 174 { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") }, 175 { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") }, 176 { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") }, 177 { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") }, 178 { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") }, 179 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") }, 180 { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") }, 181 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") }, 182 { .id = 0, DEFINE_NAME("tree") }, 183 }; 184 185 #undef DEFINE_LEVEL 186 #undef DEFINE_NAME 187 188 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, 189 int level) 190 { 191 struct btrfs_lockdep_keyset *ks; 192 193 BUG_ON(level >= ARRAY_SIZE(ks->keys)); 194 195 /* find the matching keyset, id 0 is the default entry */ 196 for (ks = btrfs_lockdep_keysets; ks->id; ks++) 197 if (ks->id == objectid) 198 break; 199 200 lockdep_set_class_and_name(&eb->lock, 201 &ks->keys[level], ks->names[level]); 202 } 203 204 #endif 205 206 /* 207 * Compute the csum of a btree block and store the result to provided buffer. 208 */ 209 static void csum_tree_block(struct extent_buffer *buf, u8 *result) 210 { 211 struct btrfs_fs_info *fs_info = buf->fs_info; 212 const int num_pages = num_extent_pages(buf); 213 const int first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize); 214 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 215 char *kaddr; 216 int i; 217 218 shash->tfm = fs_info->csum_shash; 219 crypto_shash_init(shash); 220 kaddr = page_address(buf->pages[0]) + offset_in_page(buf->start); 221 crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE, 222 first_page_part - BTRFS_CSUM_SIZE); 223 224 for (i = 1; i < num_pages; i++) { 225 kaddr = page_address(buf->pages[i]); 226 crypto_shash_update(shash, kaddr, PAGE_SIZE); 227 } 228 memset(result, 0, BTRFS_CSUM_SIZE); 229 crypto_shash_final(shash, result); 230 } 231 232 /* 233 * we can't consider a given block up to date unless the transid of the 234 * block matches the transid in the parent node's pointer. This is how we 235 * detect blocks that either didn't get written at all or got written 236 * in the wrong place. 237 */ 238 static int verify_parent_transid(struct extent_io_tree *io_tree, 239 struct extent_buffer *eb, u64 parent_transid, 240 int atomic) 241 { 242 struct extent_state *cached_state = NULL; 243 int ret; 244 245 if (!parent_transid || btrfs_header_generation(eb) == parent_transid) 246 return 0; 247 248 if (atomic) 249 return -EAGAIN; 250 251 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 252 &cached_state); 253 if (extent_buffer_uptodate(eb) && 254 btrfs_header_generation(eb) == parent_transid) { 255 ret = 0; 256 goto out; 257 } 258 btrfs_err_rl(eb->fs_info, 259 "parent transid verify failed on %llu wanted %llu found %llu", 260 eb->start, 261 parent_transid, btrfs_header_generation(eb)); 262 ret = 1; 263 clear_extent_buffer_uptodate(eb); 264 out: 265 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, 266 &cached_state); 267 return ret; 268 } 269 270 static bool btrfs_supported_super_csum(u16 csum_type) 271 { 272 switch (csum_type) { 273 case BTRFS_CSUM_TYPE_CRC32: 274 case BTRFS_CSUM_TYPE_XXHASH: 275 case BTRFS_CSUM_TYPE_SHA256: 276 case BTRFS_CSUM_TYPE_BLAKE2: 277 return true; 278 default: 279 return false; 280 } 281 } 282 283 /* 284 * Return 0 if the superblock checksum type matches the checksum value of that 285 * algorithm. Pass the raw disk superblock data. 286 */ 287 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, 288 char *raw_disk_sb) 289 { 290 struct btrfs_super_block *disk_sb = 291 (struct btrfs_super_block *)raw_disk_sb; 292 char result[BTRFS_CSUM_SIZE]; 293 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 294 295 shash->tfm = fs_info->csum_shash; 296 297 /* 298 * The super_block structure does not span the whole 299 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is 300 * filled with zeros and is included in the checksum. 301 */ 302 crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE, 303 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result); 304 305 if (memcmp(disk_sb->csum, result, fs_info->csum_size)) 306 return 1; 307 308 return 0; 309 } 310 311 int btrfs_verify_level_key(struct extent_buffer *eb, int level, 312 struct btrfs_key *first_key, u64 parent_transid) 313 { 314 struct btrfs_fs_info *fs_info = eb->fs_info; 315 int found_level; 316 struct btrfs_key found_key; 317 int ret; 318 319 found_level = btrfs_header_level(eb); 320 if (found_level != level) { 321 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 322 KERN_ERR "BTRFS: tree level check failed\n"); 323 btrfs_err(fs_info, 324 "tree level mismatch detected, bytenr=%llu level expected=%u has=%u", 325 eb->start, level, found_level); 326 return -EIO; 327 } 328 329 if (!first_key) 330 return 0; 331 332 /* 333 * For live tree block (new tree blocks in current transaction), 334 * we need proper lock context to avoid race, which is impossible here. 335 * So we only checks tree blocks which is read from disk, whose 336 * generation <= fs_info->last_trans_committed. 337 */ 338 if (btrfs_header_generation(eb) > fs_info->last_trans_committed) 339 return 0; 340 341 /* We have @first_key, so this @eb must have at least one item */ 342 if (btrfs_header_nritems(eb) == 0) { 343 btrfs_err(fs_info, 344 "invalid tree nritems, bytenr=%llu nritems=0 expect >0", 345 eb->start); 346 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 347 return -EUCLEAN; 348 } 349 350 if (found_level) 351 btrfs_node_key_to_cpu(eb, &found_key, 0); 352 else 353 btrfs_item_key_to_cpu(eb, &found_key, 0); 354 ret = btrfs_comp_cpu_keys(first_key, &found_key); 355 356 if (ret) { 357 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 358 KERN_ERR "BTRFS: tree first key check failed\n"); 359 btrfs_err(fs_info, 360 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)", 361 eb->start, parent_transid, first_key->objectid, 362 first_key->type, first_key->offset, 363 found_key.objectid, found_key.type, 364 found_key.offset); 365 } 366 return ret; 367 } 368 369 /* 370 * helper to read a given tree block, doing retries as required when 371 * the checksums don't match and we have alternate mirrors to try. 372 * 373 * @parent_transid: expected transid, skip check if 0 374 * @level: expected level, mandatory check 375 * @first_key: expected key of first slot, skip check if NULL 376 */ 377 static int btree_read_extent_buffer_pages(struct extent_buffer *eb, 378 u64 parent_transid, int level, 379 struct btrfs_key *first_key) 380 { 381 struct btrfs_fs_info *fs_info = eb->fs_info; 382 struct extent_io_tree *io_tree; 383 int failed = 0; 384 int ret; 385 int num_copies = 0; 386 int mirror_num = 0; 387 int failed_mirror = 0; 388 389 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; 390 while (1) { 391 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 392 ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num); 393 if (!ret) { 394 if (verify_parent_transid(io_tree, eb, 395 parent_transid, 0)) 396 ret = -EIO; 397 else if (btrfs_verify_level_key(eb, level, 398 first_key, parent_transid)) 399 ret = -EUCLEAN; 400 else 401 break; 402 } 403 404 num_copies = btrfs_num_copies(fs_info, 405 eb->start, eb->len); 406 if (num_copies == 1) 407 break; 408 409 if (!failed_mirror) { 410 failed = 1; 411 failed_mirror = eb->read_mirror; 412 } 413 414 mirror_num++; 415 if (mirror_num == failed_mirror) 416 mirror_num++; 417 418 if (mirror_num > num_copies) 419 break; 420 } 421 422 if (failed && !ret && failed_mirror) 423 btrfs_repair_eb_io_failure(eb, failed_mirror); 424 425 return ret; 426 } 427 428 static int csum_one_extent_buffer(struct extent_buffer *eb) 429 { 430 struct btrfs_fs_info *fs_info = eb->fs_info; 431 u8 result[BTRFS_CSUM_SIZE]; 432 int ret; 433 434 ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid, 435 offsetof(struct btrfs_header, fsid), 436 BTRFS_FSID_SIZE) == 0); 437 csum_tree_block(eb, result); 438 439 if (btrfs_header_level(eb)) 440 ret = btrfs_check_node(eb); 441 else 442 ret = btrfs_check_leaf_full(eb); 443 444 if (ret < 0) { 445 btrfs_print_tree(eb, 0); 446 btrfs_err(fs_info, 447 "block=%llu write time tree block corruption detected", 448 eb->start); 449 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 450 return ret; 451 } 452 write_extent_buffer(eb, result, 0, fs_info->csum_size); 453 454 return 0; 455 } 456 457 /* Checksum all dirty extent buffers in one bio_vec */ 458 static int csum_dirty_subpage_buffers(struct btrfs_fs_info *fs_info, 459 struct bio_vec *bvec) 460 { 461 struct page *page = bvec->bv_page; 462 u64 bvec_start = page_offset(page) + bvec->bv_offset; 463 u64 cur; 464 int ret = 0; 465 466 for (cur = bvec_start; cur < bvec_start + bvec->bv_len; 467 cur += fs_info->nodesize) { 468 struct extent_buffer *eb; 469 bool uptodate; 470 471 eb = find_extent_buffer(fs_info, cur); 472 uptodate = btrfs_subpage_test_uptodate(fs_info, page, cur, 473 fs_info->nodesize); 474 475 /* A dirty eb shouldn't disappear from buffer_radix */ 476 if (WARN_ON(!eb)) 477 return -EUCLEAN; 478 479 if (WARN_ON(cur != btrfs_header_bytenr(eb))) { 480 free_extent_buffer(eb); 481 return -EUCLEAN; 482 } 483 if (WARN_ON(!uptodate)) { 484 free_extent_buffer(eb); 485 return -EUCLEAN; 486 } 487 488 ret = csum_one_extent_buffer(eb); 489 free_extent_buffer(eb); 490 if (ret < 0) 491 return ret; 492 } 493 return ret; 494 } 495 496 /* 497 * Checksum a dirty tree block before IO. This has extra checks to make sure 498 * we only fill in the checksum field in the first page of a multi-page block. 499 * For subpage extent buffers we need bvec to also read the offset in the page. 500 */ 501 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct bio_vec *bvec) 502 { 503 struct page *page = bvec->bv_page; 504 u64 start = page_offset(page); 505 u64 found_start; 506 struct extent_buffer *eb; 507 508 if (fs_info->sectorsize < PAGE_SIZE) 509 return csum_dirty_subpage_buffers(fs_info, bvec); 510 511 eb = (struct extent_buffer *)page->private; 512 if (page != eb->pages[0]) 513 return 0; 514 515 found_start = btrfs_header_bytenr(eb); 516 517 if (test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags)) { 518 WARN_ON(found_start != 0); 519 return 0; 520 } 521 522 /* 523 * Please do not consolidate these warnings into a single if. 524 * It is useful to know what went wrong. 525 */ 526 if (WARN_ON(found_start != start)) 527 return -EUCLEAN; 528 if (WARN_ON(!PageUptodate(page))) 529 return -EUCLEAN; 530 531 return csum_one_extent_buffer(eb); 532 } 533 534 static int check_tree_block_fsid(struct extent_buffer *eb) 535 { 536 struct btrfs_fs_info *fs_info = eb->fs_info; 537 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; 538 u8 fsid[BTRFS_FSID_SIZE]; 539 u8 *metadata_uuid; 540 541 read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid), 542 BTRFS_FSID_SIZE); 543 /* 544 * Checking the incompat flag is only valid for the current fs. For 545 * seed devices it's forbidden to have their uuid changed so reading 546 * ->fsid in this case is fine 547 */ 548 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) 549 metadata_uuid = fs_devices->metadata_uuid; 550 else 551 metadata_uuid = fs_devices->fsid; 552 553 if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) 554 return 0; 555 556 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) 557 if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE)) 558 return 0; 559 560 return 1; 561 } 562 563 /* Do basic extent buffer checks at read time */ 564 static int validate_extent_buffer(struct extent_buffer *eb) 565 { 566 struct btrfs_fs_info *fs_info = eb->fs_info; 567 u64 found_start; 568 const u32 csum_size = fs_info->csum_size; 569 u8 found_level; 570 u8 result[BTRFS_CSUM_SIZE]; 571 const u8 *header_csum; 572 int ret = 0; 573 574 found_start = btrfs_header_bytenr(eb); 575 if (found_start != eb->start) { 576 btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu", 577 eb->start, found_start); 578 ret = -EIO; 579 goto out; 580 } 581 if (check_tree_block_fsid(eb)) { 582 btrfs_err_rl(fs_info, "bad fsid on block %llu", 583 eb->start); 584 ret = -EIO; 585 goto out; 586 } 587 found_level = btrfs_header_level(eb); 588 if (found_level >= BTRFS_MAX_LEVEL) { 589 btrfs_err(fs_info, "bad tree block level %d on %llu", 590 (int)btrfs_header_level(eb), eb->start); 591 ret = -EIO; 592 goto out; 593 } 594 595 csum_tree_block(eb, result); 596 header_csum = page_address(eb->pages[0]) + 597 get_eb_offset_in_page(eb, offsetof(struct btrfs_header, csum)); 598 599 if (memcmp(result, header_csum, csum_size) != 0) { 600 btrfs_warn_rl(fs_info, 601 "checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d", 602 eb->start, 603 CSUM_FMT_VALUE(csum_size, header_csum), 604 CSUM_FMT_VALUE(csum_size, result), 605 btrfs_header_level(eb)); 606 ret = -EUCLEAN; 607 goto out; 608 } 609 610 /* 611 * If this is a leaf block and it is corrupt, set the corrupt bit so 612 * that we don't try and read the other copies of this block, just 613 * return -EIO. 614 */ 615 if (found_level == 0 && btrfs_check_leaf_full(eb)) { 616 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 617 ret = -EIO; 618 } 619 620 if (found_level > 0 && btrfs_check_node(eb)) 621 ret = -EIO; 622 623 if (!ret) 624 set_extent_buffer_uptodate(eb); 625 else 626 btrfs_err(fs_info, 627 "block=%llu read time tree block corruption detected", 628 eb->start); 629 out: 630 return ret; 631 } 632 633 static int validate_subpage_buffer(struct page *page, u64 start, u64 end, 634 int mirror) 635 { 636 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); 637 struct extent_buffer *eb; 638 bool reads_done; 639 int ret = 0; 640 641 /* 642 * We don't allow bio merge for subpage metadata read, so we should 643 * only get one eb for each endio hook. 644 */ 645 ASSERT(end == start + fs_info->nodesize - 1); 646 ASSERT(PagePrivate(page)); 647 648 eb = find_extent_buffer(fs_info, start); 649 /* 650 * When we are reading one tree block, eb must have been inserted into 651 * the radix tree. If not, something is wrong. 652 */ 653 ASSERT(eb); 654 655 reads_done = atomic_dec_and_test(&eb->io_pages); 656 /* Subpage read must finish in page read */ 657 ASSERT(reads_done); 658 659 eb->read_mirror = mirror; 660 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { 661 ret = -EIO; 662 goto err; 663 } 664 ret = validate_extent_buffer(eb); 665 if (ret < 0) 666 goto err; 667 668 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) 669 btree_readahead_hook(eb, ret); 670 671 set_extent_buffer_uptodate(eb); 672 673 free_extent_buffer(eb); 674 return ret; 675 err: 676 /* 677 * end_bio_extent_readpage decrements io_pages in case of error, 678 * make sure it has something to decrement. 679 */ 680 atomic_inc(&eb->io_pages); 681 clear_extent_buffer_uptodate(eb); 682 free_extent_buffer(eb); 683 return ret; 684 } 685 686 int btrfs_validate_metadata_buffer(struct btrfs_bio *bbio, 687 struct page *page, u64 start, u64 end, 688 int mirror) 689 { 690 struct extent_buffer *eb; 691 int ret = 0; 692 int reads_done; 693 694 ASSERT(page->private); 695 696 if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE) 697 return validate_subpage_buffer(page, start, end, mirror); 698 699 eb = (struct extent_buffer *)page->private; 700 701 /* 702 * The pending IO might have been the only thing that kept this buffer 703 * in memory. Make sure we have a ref for all this other checks 704 */ 705 atomic_inc(&eb->refs); 706 707 reads_done = atomic_dec_and_test(&eb->io_pages); 708 if (!reads_done) 709 goto err; 710 711 eb->read_mirror = mirror; 712 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { 713 ret = -EIO; 714 goto err; 715 } 716 ret = validate_extent_buffer(eb); 717 err: 718 if (reads_done && 719 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) 720 btree_readahead_hook(eb, ret); 721 722 if (ret) { 723 /* 724 * our io error hook is going to dec the io pages 725 * again, we have to make sure it has something 726 * to decrement 727 */ 728 atomic_inc(&eb->io_pages); 729 clear_extent_buffer_uptodate(eb); 730 } 731 free_extent_buffer(eb); 732 733 return ret; 734 } 735 736 static void end_workqueue_bio(struct bio *bio) 737 { 738 struct btrfs_end_io_wq *end_io_wq = bio->bi_private; 739 struct btrfs_fs_info *fs_info; 740 struct btrfs_workqueue *wq; 741 742 fs_info = end_io_wq->info; 743 end_io_wq->status = bio->bi_status; 744 745 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 746 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) 747 wq = fs_info->endio_meta_write_workers; 748 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) 749 wq = fs_info->endio_freespace_worker; 750 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) 751 wq = fs_info->endio_raid56_workers; 752 else 753 wq = fs_info->endio_write_workers; 754 } else { 755 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) 756 wq = fs_info->endio_raid56_workers; 757 else if (end_io_wq->metadata) 758 wq = fs_info->endio_meta_workers; 759 else 760 wq = fs_info->endio_workers; 761 } 762 763 btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL); 764 btrfs_queue_work(wq, &end_io_wq->work); 765 } 766 767 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 768 enum btrfs_wq_endio_type metadata) 769 { 770 struct btrfs_end_io_wq *end_io_wq; 771 772 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); 773 if (!end_io_wq) 774 return BLK_STS_RESOURCE; 775 776 end_io_wq->private = bio->bi_private; 777 end_io_wq->end_io = bio->bi_end_io; 778 end_io_wq->info = info; 779 end_io_wq->status = 0; 780 end_io_wq->bio = bio; 781 end_io_wq->metadata = metadata; 782 783 bio->bi_private = end_io_wq; 784 bio->bi_end_io = end_workqueue_bio; 785 return 0; 786 } 787 788 static void run_one_async_start(struct btrfs_work *work) 789 { 790 struct async_submit_bio *async; 791 blk_status_t ret; 792 793 async = container_of(work, struct async_submit_bio, work); 794 ret = async->submit_bio_start(async->inode, async->bio, 795 async->dio_file_offset); 796 if (ret) 797 async->status = ret; 798 } 799 800 /* 801 * In order to insert checksums into the metadata in large chunks, we wait 802 * until bio submission time. All the pages in the bio are checksummed and 803 * sums are attached onto the ordered extent record. 804 * 805 * At IO completion time the csums attached on the ordered extent record are 806 * inserted into the tree. 807 */ 808 static void run_one_async_done(struct btrfs_work *work) 809 { 810 struct async_submit_bio *async; 811 struct inode *inode; 812 blk_status_t ret; 813 814 async = container_of(work, struct async_submit_bio, work); 815 inode = async->inode; 816 817 /* If an error occurred we just want to clean up the bio and move on */ 818 if (async->status) { 819 async->bio->bi_status = async->status; 820 bio_endio(async->bio); 821 return; 822 } 823 824 /* 825 * All of the bios that pass through here are from async helpers. 826 * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context. 827 * This changes nothing when cgroups aren't in use. 828 */ 829 async->bio->bi_opf |= REQ_CGROUP_PUNT; 830 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num); 831 if (ret) { 832 async->bio->bi_status = ret; 833 bio_endio(async->bio); 834 } 835 } 836 837 static void run_one_async_free(struct btrfs_work *work) 838 { 839 struct async_submit_bio *async; 840 841 async = container_of(work, struct async_submit_bio, work); 842 kfree(async); 843 } 844 845 blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, 846 int mirror_num, unsigned long bio_flags, 847 u64 dio_file_offset, 848 extent_submit_bio_start_t *submit_bio_start) 849 { 850 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 851 struct async_submit_bio *async; 852 853 async = kmalloc(sizeof(*async), GFP_NOFS); 854 if (!async) 855 return BLK_STS_RESOURCE; 856 857 async->inode = inode; 858 async->bio = bio; 859 async->mirror_num = mirror_num; 860 async->submit_bio_start = submit_bio_start; 861 862 btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, 863 run_one_async_free); 864 865 async->dio_file_offset = dio_file_offset; 866 867 async->status = 0; 868 869 if (op_is_sync(bio->bi_opf)) 870 btrfs_set_work_high_priority(&async->work); 871 872 btrfs_queue_work(fs_info->workers, &async->work); 873 return 0; 874 } 875 876 static blk_status_t btree_csum_one_bio(struct bio *bio) 877 { 878 struct bio_vec *bvec; 879 struct btrfs_root *root; 880 int ret = 0; 881 struct bvec_iter_all iter_all; 882 883 ASSERT(!bio_flagged(bio, BIO_CLONED)); 884 bio_for_each_segment_all(bvec, bio, iter_all) { 885 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 886 ret = csum_dirty_buffer(root->fs_info, bvec); 887 if (ret) 888 break; 889 } 890 891 return errno_to_blk_status(ret); 892 } 893 894 static blk_status_t btree_submit_bio_start(struct inode *inode, struct bio *bio, 895 u64 dio_file_offset) 896 { 897 /* 898 * when we're called for a write, we're already in the async 899 * submission context. Just jump into btrfs_map_bio 900 */ 901 return btree_csum_one_bio(bio); 902 } 903 904 static bool should_async_write(struct btrfs_fs_info *fs_info, 905 struct btrfs_inode *bi) 906 { 907 if (btrfs_is_zoned(fs_info)) 908 return false; 909 if (atomic_read(&bi->sync_writers)) 910 return false; 911 if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags)) 912 return false; 913 return true; 914 } 915 916 blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, 917 int mirror_num, unsigned long bio_flags) 918 { 919 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 920 blk_status_t ret; 921 922 if (btrfs_op(bio) != BTRFS_MAP_WRITE) { 923 /* 924 * called for a read, do the setup so that checksum validation 925 * can happen in the async kernel threads 926 */ 927 ret = btrfs_bio_wq_end_io(fs_info, bio, 928 BTRFS_WQ_ENDIO_METADATA); 929 if (ret) 930 goto out_w_error; 931 ret = btrfs_map_bio(fs_info, bio, mirror_num); 932 } else if (!should_async_write(fs_info, BTRFS_I(inode))) { 933 ret = btree_csum_one_bio(bio); 934 if (ret) 935 goto out_w_error; 936 ret = btrfs_map_bio(fs_info, bio, mirror_num); 937 } else { 938 /* 939 * kthread helpers are used to submit writes so that 940 * checksumming can happen in parallel across all CPUs 941 */ 942 ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0, 943 0, btree_submit_bio_start); 944 } 945 946 if (ret) 947 goto out_w_error; 948 return 0; 949 950 out_w_error: 951 bio->bi_status = ret; 952 bio_endio(bio); 953 return ret; 954 } 955 956 #ifdef CONFIG_MIGRATION 957 static int btree_migratepage(struct address_space *mapping, 958 struct page *newpage, struct page *page, 959 enum migrate_mode mode) 960 { 961 /* 962 * we can't safely write a btree page from here, 963 * we haven't done the locking hook 964 */ 965 if (PageDirty(page)) 966 return -EAGAIN; 967 /* 968 * Buffers may be managed in a filesystem specific way. 969 * We must have no buffers or drop them. 970 */ 971 if (page_has_private(page) && 972 !try_to_release_page(page, GFP_KERNEL)) 973 return -EAGAIN; 974 return migrate_page(mapping, newpage, page, mode); 975 } 976 #endif 977 978 979 static int btree_writepages(struct address_space *mapping, 980 struct writeback_control *wbc) 981 { 982 struct btrfs_fs_info *fs_info; 983 int ret; 984 985 if (wbc->sync_mode == WB_SYNC_NONE) { 986 987 if (wbc->for_kupdate) 988 return 0; 989 990 fs_info = BTRFS_I(mapping->host)->root->fs_info; 991 /* this is a bit racy, but that's ok */ 992 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, 993 BTRFS_DIRTY_METADATA_THRESH, 994 fs_info->dirty_metadata_batch); 995 if (ret < 0) 996 return 0; 997 } 998 return btree_write_cache_pages(mapping, wbc); 999 } 1000 1001 static int btree_releasepage(struct page *page, gfp_t gfp_flags) 1002 { 1003 if (PageWriteback(page) || PageDirty(page)) 1004 return 0; 1005 1006 return try_release_extent_buffer(page); 1007 } 1008 1009 static void btree_invalidatepage(struct page *page, unsigned int offset, 1010 unsigned int length) 1011 { 1012 struct extent_io_tree *tree; 1013 tree = &BTRFS_I(page->mapping->host)->io_tree; 1014 extent_invalidatepage(tree, page, offset); 1015 btree_releasepage(page, GFP_NOFS); 1016 if (PagePrivate(page)) { 1017 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, 1018 "page private not zero on page %llu", 1019 (unsigned long long)page_offset(page)); 1020 detach_page_private(page); 1021 } 1022 } 1023 1024 static int btree_set_page_dirty(struct page *page) 1025 { 1026 #ifdef DEBUG 1027 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); 1028 struct btrfs_subpage *subpage; 1029 struct extent_buffer *eb; 1030 int cur_bit = 0; 1031 u64 page_start = page_offset(page); 1032 1033 if (fs_info->sectorsize == PAGE_SIZE) { 1034 BUG_ON(!PagePrivate(page)); 1035 eb = (struct extent_buffer *)page->private; 1036 BUG_ON(!eb); 1037 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); 1038 BUG_ON(!atomic_read(&eb->refs)); 1039 btrfs_assert_tree_write_locked(eb); 1040 return __set_page_dirty_nobuffers(page); 1041 } 1042 ASSERT(PagePrivate(page) && page->private); 1043 subpage = (struct btrfs_subpage *)page->private; 1044 1045 ASSERT(subpage->dirty_bitmap); 1046 while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) { 1047 unsigned long flags; 1048 u64 cur; 1049 u16 tmp = (1 << cur_bit); 1050 1051 spin_lock_irqsave(&subpage->lock, flags); 1052 if (!(tmp & subpage->dirty_bitmap)) { 1053 spin_unlock_irqrestore(&subpage->lock, flags); 1054 cur_bit++; 1055 continue; 1056 } 1057 spin_unlock_irqrestore(&subpage->lock, flags); 1058 cur = page_start + cur_bit * fs_info->sectorsize; 1059 1060 eb = find_extent_buffer(fs_info, cur); 1061 ASSERT(eb); 1062 ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); 1063 ASSERT(atomic_read(&eb->refs)); 1064 btrfs_assert_tree_write_locked(eb); 1065 free_extent_buffer(eb); 1066 1067 cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits); 1068 } 1069 #endif 1070 return __set_page_dirty_nobuffers(page); 1071 } 1072 1073 static const struct address_space_operations btree_aops = { 1074 .writepages = btree_writepages, 1075 .releasepage = btree_releasepage, 1076 .invalidatepage = btree_invalidatepage, 1077 #ifdef CONFIG_MIGRATION 1078 .migratepage = btree_migratepage, 1079 #endif 1080 .set_page_dirty = btree_set_page_dirty, 1081 }; 1082 1083 struct extent_buffer *btrfs_find_create_tree_block( 1084 struct btrfs_fs_info *fs_info, 1085 u64 bytenr, u64 owner_root, 1086 int level) 1087 { 1088 if (btrfs_is_testing(fs_info)) 1089 return alloc_test_extent_buffer(fs_info, bytenr); 1090 return alloc_extent_buffer(fs_info, bytenr, owner_root, level); 1091 } 1092 1093 /* 1094 * Read tree block at logical address @bytenr and do variant basic but critical 1095 * verification. 1096 * 1097 * @owner_root: the objectid of the root owner for this block. 1098 * @parent_transid: expected transid of this tree block, skip check if 0 1099 * @level: expected level, mandatory check 1100 * @first_key: expected key in slot 0, skip check if NULL 1101 */ 1102 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, 1103 u64 owner_root, u64 parent_transid, 1104 int level, struct btrfs_key *first_key) 1105 { 1106 struct extent_buffer *buf = NULL; 1107 int ret; 1108 1109 buf = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level); 1110 if (IS_ERR(buf)) 1111 return buf; 1112 1113 ret = btree_read_extent_buffer_pages(buf, parent_transid, 1114 level, first_key); 1115 if (ret) { 1116 free_extent_buffer_stale(buf); 1117 return ERR_PTR(ret); 1118 } 1119 return buf; 1120 1121 } 1122 1123 void btrfs_clean_tree_block(struct extent_buffer *buf) 1124 { 1125 struct btrfs_fs_info *fs_info = buf->fs_info; 1126 if (btrfs_header_generation(buf) == 1127 fs_info->running_transaction->transid) { 1128 btrfs_assert_tree_write_locked(buf); 1129 1130 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { 1131 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, 1132 -buf->len, 1133 fs_info->dirty_metadata_batch); 1134 clear_extent_buffer_dirty(buf); 1135 } 1136 } 1137 } 1138 1139 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, 1140 u64 objectid) 1141 { 1142 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); 1143 root->fs_info = fs_info; 1144 root->node = NULL; 1145 root->commit_root = NULL; 1146 root->state = 0; 1147 root->orphan_cleanup_state = 0; 1148 1149 root->last_trans = 0; 1150 root->free_objectid = 0; 1151 root->nr_delalloc_inodes = 0; 1152 root->nr_ordered_extents = 0; 1153 root->inode_tree = RB_ROOT; 1154 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); 1155 root->block_rsv = NULL; 1156 1157 INIT_LIST_HEAD(&root->dirty_list); 1158 INIT_LIST_HEAD(&root->root_list); 1159 INIT_LIST_HEAD(&root->delalloc_inodes); 1160 INIT_LIST_HEAD(&root->delalloc_root); 1161 INIT_LIST_HEAD(&root->ordered_extents); 1162 INIT_LIST_HEAD(&root->ordered_root); 1163 INIT_LIST_HEAD(&root->reloc_dirty_list); 1164 INIT_LIST_HEAD(&root->logged_list[0]); 1165 INIT_LIST_HEAD(&root->logged_list[1]); 1166 spin_lock_init(&root->inode_lock); 1167 spin_lock_init(&root->delalloc_lock); 1168 spin_lock_init(&root->ordered_extent_lock); 1169 spin_lock_init(&root->accounting_lock); 1170 spin_lock_init(&root->log_extents_lock[0]); 1171 spin_lock_init(&root->log_extents_lock[1]); 1172 spin_lock_init(&root->qgroup_meta_rsv_lock); 1173 mutex_init(&root->objectid_mutex); 1174 mutex_init(&root->log_mutex); 1175 mutex_init(&root->ordered_extent_mutex); 1176 mutex_init(&root->delalloc_mutex); 1177 init_waitqueue_head(&root->qgroup_flush_wait); 1178 init_waitqueue_head(&root->log_writer_wait); 1179 init_waitqueue_head(&root->log_commit_wait[0]); 1180 init_waitqueue_head(&root->log_commit_wait[1]); 1181 INIT_LIST_HEAD(&root->log_ctxs[0]); 1182 INIT_LIST_HEAD(&root->log_ctxs[1]); 1183 atomic_set(&root->log_commit[0], 0); 1184 atomic_set(&root->log_commit[1], 0); 1185 atomic_set(&root->log_writers, 0); 1186 atomic_set(&root->log_batch, 0); 1187 refcount_set(&root->refs, 1); 1188 atomic_set(&root->snapshot_force_cow, 0); 1189 atomic_set(&root->nr_swapfiles, 0); 1190 root->log_transid = 0; 1191 root->log_transid_committed = -1; 1192 root->last_log_commit = 0; 1193 if (!dummy) { 1194 extent_io_tree_init(fs_info, &root->dirty_log_pages, 1195 IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL); 1196 extent_io_tree_init(fs_info, &root->log_csum_range, 1197 IO_TREE_LOG_CSUM_RANGE, NULL); 1198 } 1199 1200 memset(&root->root_key, 0, sizeof(root->root_key)); 1201 memset(&root->root_item, 0, sizeof(root->root_item)); 1202 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); 1203 root->root_key.objectid = objectid; 1204 root->anon_dev = 0; 1205 1206 spin_lock_init(&root->root_item_lock); 1207 btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks); 1208 #ifdef CONFIG_BTRFS_DEBUG 1209 INIT_LIST_HEAD(&root->leak_list); 1210 spin_lock(&fs_info->fs_roots_radix_lock); 1211 list_add_tail(&root->leak_list, &fs_info->allocated_roots); 1212 spin_unlock(&fs_info->fs_roots_radix_lock); 1213 #endif 1214 } 1215 1216 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, 1217 u64 objectid, gfp_t flags) 1218 { 1219 struct btrfs_root *root = kzalloc(sizeof(*root), flags); 1220 if (root) 1221 __setup_root(root, fs_info, objectid); 1222 return root; 1223 } 1224 1225 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1226 /* Should only be used by the testing infrastructure */ 1227 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info) 1228 { 1229 struct btrfs_root *root; 1230 1231 if (!fs_info) 1232 return ERR_PTR(-EINVAL); 1233 1234 root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL); 1235 if (!root) 1236 return ERR_PTR(-ENOMEM); 1237 1238 /* We don't use the stripesize in selftest, set it as sectorsize */ 1239 root->alloc_bytenr = 0; 1240 1241 return root; 1242 } 1243 #endif 1244 1245 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, 1246 u64 objectid) 1247 { 1248 struct btrfs_fs_info *fs_info = trans->fs_info; 1249 struct extent_buffer *leaf; 1250 struct btrfs_root *tree_root = fs_info->tree_root; 1251 struct btrfs_root *root; 1252 struct btrfs_key key; 1253 unsigned int nofs_flag; 1254 int ret = 0; 1255 1256 /* 1257 * We're holding a transaction handle, so use a NOFS memory allocation 1258 * context to avoid deadlock if reclaim happens. 1259 */ 1260 nofs_flag = memalloc_nofs_save(); 1261 root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL); 1262 memalloc_nofs_restore(nofs_flag); 1263 if (!root) 1264 return ERR_PTR(-ENOMEM); 1265 1266 root->root_key.objectid = objectid; 1267 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 1268 root->root_key.offset = 0; 1269 1270 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0, 1271 BTRFS_NESTING_NORMAL); 1272 if (IS_ERR(leaf)) { 1273 ret = PTR_ERR(leaf); 1274 leaf = NULL; 1275 goto fail_unlock; 1276 } 1277 1278 root->node = leaf; 1279 btrfs_mark_buffer_dirty(leaf); 1280 1281 root->commit_root = btrfs_root_node(root); 1282 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 1283 1284 btrfs_set_root_flags(&root->root_item, 0); 1285 btrfs_set_root_limit(&root->root_item, 0); 1286 btrfs_set_root_bytenr(&root->root_item, leaf->start); 1287 btrfs_set_root_generation(&root->root_item, trans->transid); 1288 btrfs_set_root_level(&root->root_item, 0); 1289 btrfs_set_root_refs(&root->root_item, 1); 1290 btrfs_set_root_used(&root->root_item, leaf->len); 1291 btrfs_set_root_last_snapshot(&root->root_item, 0); 1292 btrfs_set_root_dirid(&root->root_item, 0); 1293 if (is_fstree(objectid)) 1294 generate_random_guid(root->root_item.uuid); 1295 else 1296 export_guid(root->root_item.uuid, &guid_null); 1297 btrfs_set_root_drop_level(&root->root_item, 0); 1298 1299 btrfs_tree_unlock(leaf); 1300 1301 key.objectid = objectid; 1302 key.type = BTRFS_ROOT_ITEM_KEY; 1303 key.offset = 0; 1304 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); 1305 if (ret) 1306 goto fail; 1307 1308 return root; 1309 1310 fail_unlock: 1311 if (leaf) 1312 btrfs_tree_unlock(leaf); 1313 fail: 1314 btrfs_put_root(root); 1315 1316 return ERR_PTR(ret); 1317 } 1318 1319 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, 1320 struct btrfs_fs_info *fs_info) 1321 { 1322 struct btrfs_root *root; 1323 1324 root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS); 1325 if (!root) 1326 return ERR_PTR(-ENOMEM); 1327 1328 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; 1329 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 1330 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; 1331 1332 return root; 1333 } 1334 1335 int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans, 1336 struct btrfs_root *root) 1337 { 1338 struct extent_buffer *leaf; 1339 1340 /* 1341 * DON'T set SHAREABLE bit for log trees. 1342 * 1343 * Log trees are not exposed to user space thus can't be snapshotted, 1344 * and they go away before a real commit is actually done. 1345 * 1346 * They do store pointers to file data extents, and those reference 1347 * counts still get updated (along with back refs to the log tree). 1348 */ 1349 1350 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, 1351 NULL, 0, 0, 0, BTRFS_NESTING_NORMAL); 1352 if (IS_ERR(leaf)) 1353 return PTR_ERR(leaf); 1354 1355 root->node = leaf; 1356 1357 btrfs_mark_buffer_dirty(root->node); 1358 btrfs_tree_unlock(root->node); 1359 1360 return 0; 1361 } 1362 1363 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, 1364 struct btrfs_fs_info *fs_info) 1365 { 1366 struct btrfs_root *log_root; 1367 1368 log_root = alloc_log_tree(trans, fs_info); 1369 if (IS_ERR(log_root)) 1370 return PTR_ERR(log_root); 1371 1372 if (!btrfs_is_zoned(fs_info)) { 1373 int ret = btrfs_alloc_log_tree_node(trans, log_root); 1374 1375 if (ret) { 1376 btrfs_put_root(log_root); 1377 return ret; 1378 } 1379 } 1380 1381 WARN_ON(fs_info->log_root_tree); 1382 fs_info->log_root_tree = log_root; 1383 return 0; 1384 } 1385 1386 int btrfs_add_log_tree(struct btrfs_trans_handle *trans, 1387 struct btrfs_root *root) 1388 { 1389 struct btrfs_fs_info *fs_info = root->fs_info; 1390 struct btrfs_root *log_root; 1391 struct btrfs_inode_item *inode_item; 1392 int ret; 1393 1394 log_root = alloc_log_tree(trans, fs_info); 1395 if (IS_ERR(log_root)) 1396 return PTR_ERR(log_root); 1397 1398 ret = btrfs_alloc_log_tree_node(trans, log_root); 1399 if (ret) { 1400 btrfs_put_root(log_root); 1401 return ret; 1402 } 1403 1404 log_root->last_trans = trans->transid; 1405 log_root->root_key.offset = root->root_key.objectid; 1406 1407 inode_item = &log_root->root_item.inode; 1408 btrfs_set_stack_inode_generation(inode_item, 1); 1409 btrfs_set_stack_inode_size(inode_item, 3); 1410 btrfs_set_stack_inode_nlink(inode_item, 1); 1411 btrfs_set_stack_inode_nbytes(inode_item, 1412 fs_info->nodesize); 1413 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); 1414 1415 btrfs_set_root_node(&log_root->root_item, log_root->node); 1416 1417 WARN_ON(root->log_root); 1418 root->log_root = log_root; 1419 root->log_transid = 0; 1420 root->log_transid_committed = -1; 1421 root->last_log_commit = 0; 1422 return 0; 1423 } 1424 1425 static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root, 1426 struct btrfs_path *path, 1427 struct btrfs_key *key) 1428 { 1429 struct btrfs_root *root; 1430 struct btrfs_fs_info *fs_info = tree_root->fs_info; 1431 u64 generation; 1432 int ret; 1433 int level; 1434 1435 root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS); 1436 if (!root) 1437 return ERR_PTR(-ENOMEM); 1438 1439 ret = btrfs_find_root(tree_root, key, path, 1440 &root->root_item, &root->root_key); 1441 if (ret) { 1442 if (ret > 0) 1443 ret = -ENOENT; 1444 goto fail; 1445 } 1446 1447 generation = btrfs_root_generation(&root->root_item); 1448 level = btrfs_root_level(&root->root_item); 1449 root->node = read_tree_block(fs_info, 1450 btrfs_root_bytenr(&root->root_item), 1451 key->objectid, generation, level, NULL); 1452 if (IS_ERR(root->node)) { 1453 ret = PTR_ERR(root->node); 1454 root->node = NULL; 1455 goto fail; 1456 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { 1457 ret = -EIO; 1458 goto fail; 1459 } 1460 root->commit_root = btrfs_root_node(root); 1461 return root; 1462 fail: 1463 btrfs_put_root(root); 1464 return ERR_PTR(ret); 1465 } 1466 1467 struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, 1468 struct btrfs_key *key) 1469 { 1470 struct btrfs_root *root; 1471 struct btrfs_path *path; 1472 1473 path = btrfs_alloc_path(); 1474 if (!path) 1475 return ERR_PTR(-ENOMEM); 1476 root = read_tree_root_path(tree_root, path, key); 1477 btrfs_free_path(path); 1478 1479 return root; 1480 } 1481 1482 /* 1483 * Initialize subvolume root in-memory structure 1484 * 1485 * @anon_dev: anonymous device to attach to the root, if zero, allocate new 1486 */ 1487 static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) 1488 { 1489 int ret; 1490 unsigned int nofs_flag; 1491 1492 /* 1493 * We might be called under a transaction (e.g. indirect backref 1494 * resolution) which could deadlock if it triggers memory reclaim 1495 */ 1496 nofs_flag = memalloc_nofs_save(); 1497 ret = btrfs_drew_lock_init(&root->snapshot_lock); 1498 memalloc_nofs_restore(nofs_flag); 1499 if (ret) 1500 goto fail; 1501 1502 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID && 1503 !btrfs_is_data_reloc_root(root)) { 1504 set_bit(BTRFS_ROOT_SHAREABLE, &root->state); 1505 btrfs_check_and_init_root_item(&root->root_item); 1506 } 1507 1508 /* 1509 * Don't assign anonymous block device to roots that are not exposed to 1510 * userspace, the id pool is limited to 1M 1511 */ 1512 if (is_fstree(root->root_key.objectid) && 1513 btrfs_root_refs(&root->root_item) > 0) { 1514 if (!anon_dev) { 1515 ret = get_anon_bdev(&root->anon_dev); 1516 if (ret) 1517 goto fail; 1518 } else { 1519 root->anon_dev = anon_dev; 1520 } 1521 } 1522 1523 mutex_lock(&root->objectid_mutex); 1524 ret = btrfs_init_root_free_objectid(root); 1525 if (ret) { 1526 mutex_unlock(&root->objectid_mutex); 1527 goto fail; 1528 } 1529 1530 ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID); 1531 1532 mutex_unlock(&root->objectid_mutex); 1533 1534 return 0; 1535 fail: 1536 /* The caller is responsible to call btrfs_free_fs_root */ 1537 return ret; 1538 } 1539 1540 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, 1541 u64 root_id) 1542 { 1543 struct btrfs_root *root; 1544 1545 spin_lock(&fs_info->fs_roots_radix_lock); 1546 root = radix_tree_lookup(&fs_info->fs_roots_radix, 1547 (unsigned long)root_id); 1548 if (root) 1549 root = btrfs_grab_root(root); 1550 spin_unlock(&fs_info->fs_roots_radix_lock); 1551 return root; 1552 } 1553 1554 static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info, 1555 u64 objectid) 1556 { 1557 if (objectid == BTRFS_ROOT_TREE_OBJECTID) 1558 return btrfs_grab_root(fs_info->tree_root); 1559 if (objectid == BTRFS_EXTENT_TREE_OBJECTID) 1560 return btrfs_grab_root(fs_info->extent_root); 1561 if (objectid == BTRFS_CHUNK_TREE_OBJECTID) 1562 return btrfs_grab_root(fs_info->chunk_root); 1563 if (objectid == BTRFS_DEV_TREE_OBJECTID) 1564 return btrfs_grab_root(fs_info->dev_root); 1565 if (objectid == BTRFS_CSUM_TREE_OBJECTID) 1566 return btrfs_grab_root(fs_info->csum_root); 1567 if (objectid == BTRFS_QUOTA_TREE_OBJECTID) 1568 return btrfs_grab_root(fs_info->quota_root) ? 1569 fs_info->quota_root : ERR_PTR(-ENOENT); 1570 if (objectid == BTRFS_UUID_TREE_OBJECTID) 1571 return btrfs_grab_root(fs_info->uuid_root) ? 1572 fs_info->uuid_root : ERR_PTR(-ENOENT); 1573 if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) 1574 return btrfs_grab_root(fs_info->free_space_root) ? 1575 fs_info->free_space_root : ERR_PTR(-ENOENT); 1576 return NULL; 1577 } 1578 1579 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, 1580 struct btrfs_root *root) 1581 { 1582 int ret; 1583 1584 ret = radix_tree_preload(GFP_NOFS); 1585 if (ret) 1586 return ret; 1587 1588 spin_lock(&fs_info->fs_roots_radix_lock); 1589 ret = radix_tree_insert(&fs_info->fs_roots_radix, 1590 (unsigned long)root->root_key.objectid, 1591 root); 1592 if (ret == 0) { 1593 btrfs_grab_root(root); 1594 set_bit(BTRFS_ROOT_IN_RADIX, &root->state); 1595 } 1596 spin_unlock(&fs_info->fs_roots_radix_lock); 1597 radix_tree_preload_end(); 1598 1599 return ret; 1600 } 1601 1602 void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info) 1603 { 1604 #ifdef CONFIG_BTRFS_DEBUG 1605 struct btrfs_root *root; 1606 1607 while (!list_empty(&fs_info->allocated_roots)) { 1608 char buf[BTRFS_ROOT_NAME_BUF_LEN]; 1609 1610 root = list_first_entry(&fs_info->allocated_roots, 1611 struct btrfs_root, leak_list); 1612 btrfs_err(fs_info, "leaked root %s refcount %d", 1613 btrfs_root_name(&root->root_key, buf), 1614 refcount_read(&root->refs)); 1615 while (refcount_read(&root->refs) > 1) 1616 btrfs_put_root(root); 1617 btrfs_put_root(root); 1618 } 1619 #endif 1620 } 1621 1622 void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) 1623 { 1624 percpu_counter_destroy(&fs_info->dirty_metadata_bytes); 1625 percpu_counter_destroy(&fs_info->delalloc_bytes); 1626 percpu_counter_destroy(&fs_info->ordered_bytes); 1627 percpu_counter_destroy(&fs_info->dev_replace.bio_counter); 1628 btrfs_free_csum_hash(fs_info); 1629 btrfs_free_stripe_hash_table(fs_info); 1630 btrfs_free_ref_cache(fs_info); 1631 kfree(fs_info->balance_ctl); 1632 kfree(fs_info->delayed_root); 1633 btrfs_put_root(fs_info->extent_root); 1634 btrfs_put_root(fs_info->tree_root); 1635 btrfs_put_root(fs_info->chunk_root); 1636 btrfs_put_root(fs_info->dev_root); 1637 btrfs_put_root(fs_info->csum_root); 1638 btrfs_put_root(fs_info->quota_root); 1639 btrfs_put_root(fs_info->uuid_root); 1640 btrfs_put_root(fs_info->free_space_root); 1641 btrfs_put_root(fs_info->fs_root); 1642 btrfs_put_root(fs_info->data_reloc_root); 1643 btrfs_check_leaked_roots(fs_info); 1644 btrfs_extent_buffer_leak_debug_check(fs_info); 1645 kfree(fs_info->super_copy); 1646 kfree(fs_info->super_for_commit); 1647 kfree(fs_info->subpage_info); 1648 kvfree(fs_info); 1649 } 1650 1651 1652 /* 1653 * Get an in-memory reference of a root structure. 1654 * 1655 * For essential trees like root/extent tree, we grab it from fs_info directly. 1656 * For subvolume trees, we check the cached filesystem roots first. If not 1657 * found, then read it from disk and add it to cached fs roots. 1658 * 1659 * Caller should release the root by calling btrfs_put_root() after the usage. 1660 * 1661 * NOTE: Reloc and log trees can't be read by this function as they share the 1662 * same root objectid. 1663 * 1664 * @objectid: root id 1665 * @anon_dev: preallocated anonymous block device number for new roots, 1666 * pass 0 for new allocation. 1667 * @check_ref: whether to check root item references, If true, return -ENOENT 1668 * for orphan roots 1669 */ 1670 static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, 1671 u64 objectid, dev_t anon_dev, 1672 bool check_ref) 1673 { 1674 struct btrfs_root *root; 1675 struct btrfs_path *path; 1676 struct btrfs_key key; 1677 int ret; 1678 1679 root = btrfs_get_global_root(fs_info, objectid); 1680 if (root) 1681 return root; 1682 again: 1683 root = btrfs_lookup_fs_root(fs_info, objectid); 1684 if (root) { 1685 /* Shouldn't get preallocated anon_dev for cached roots */ 1686 ASSERT(!anon_dev); 1687 if (check_ref && btrfs_root_refs(&root->root_item) == 0) { 1688 btrfs_put_root(root); 1689 return ERR_PTR(-ENOENT); 1690 } 1691 return root; 1692 } 1693 1694 key.objectid = objectid; 1695 key.type = BTRFS_ROOT_ITEM_KEY; 1696 key.offset = (u64)-1; 1697 root = btrfs_read_tree_root(fs_info->tree_root, &key); 1698 if (IS_ERR(root)) 1699 return root; 1700 1701 if (check_ref && btrfs_root_refs(&root->root_item) == 0) { 1702 ret = -ENOENT; 1703 goto fail; 1704 } 1705 1706 ret = btrfs_init_fs_root(root, anon_dev); 1707 if (ret) 1708 goto fail; 1709 1710 path = btrfs_alloc_path(); 1711 if (!path) { 1712 ret = -ENOMEM; 1713 goto fail; 1714 } 1715 key.objectid = BTRFS_ORPHAN_OBJECTID; 1716 key.type = BTRFS_ORPHAN_ITEM_KEY; 1717 key.offset = objectid; 1718 1719 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 1720 btrfs_free_path(path); 1721 if (ret < 0) 1722 goto fail; 1723 if (ret == 0) 1724 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); 1725 1726 ret = btrfs_insert_fs_root(fs_info, root); 1727 if (ret) { 1728 btrfs_put_root(root); 1729 if (ret == -EEXIST) 1730 goto again; 1731 goto fail; 1732 } 1733 return root; 1734 fail: 1735 /* 1736 * If our caller provided us an anonymous device, then it's his 1737 * responsability to free it in case we fail. So we have to set our 1738 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root() 1739 * and once again by our caller. 1740 */ 1741 if (anon_dev) 1742 root->anon_dev = 0; 1743 btrfs_put_root(root); 1744 return ERR_PTR(ret); 1745 } 1746 1747 /* 1748 * Get in-memory reference of a root structure 1749 * 1750 * @objectid: tree objectid 1751 * @check_ref: if set, verify that the tree exists and the item has at least 1752 * one reference 1753 */ 1754 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, 1755 u64 objectid, bool check_ref) 1756 { 1757 return btrfs_get_root_ref(fs_info, objectid, 0, check_ref); 1758 } 1759 1760 /* 1761 * Get in-memory reference of a root structure, created as new, optionally pass 1762 * the anonymous block device id 1763 * 1764 * @objectid: tree objectid 1765 * @anon_dev: if zero, allocate a new anonymous block device or use the 1766 * parameter value 1767 */ 1768 struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, 1769 u64 objectid, dev_t anon_dev) 1770 { 1771 return btrfs_get_root_ref(fs_info, objectid, anon_dev, true); 1772 } 1773 1774 /* 1775 * btrfs_get_fs_root_commit_root - return a root for the given objectid 1776 * @fs_info: the fs_info 1777 * @objectid: the objectid we need to lookup 1778 * 1779 * This is exclusively used for backref walking, and exists specifically because 1780 * of how qgroups does lookups. Qgroups will do a backref lookup at delayed ref 1781 * creation time, which means we may have to read the tree_root in order to look 1782 * up a fs root that is not in memory. If the root is not in memory we will 1783 * read the tree root commit root and look up the fs root from there. This is a 1784 * temporary root, it will not be inserted into the radix tree as it doesn't 1785 * have the most uptodate information, it'll simply be discarded once the 1786 * backref code is finished using the root. 1787 */ 1788 struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info, 1789 struct btrfs_path *path, 1790 u64 objectid) 1791 { 1792 struct btrfs_root *root; 1793 struct btrfs_key key; 1794 1795 ASSERT(path->search_commit_root && path->skip_locking); 1796 1797 /* 1798 * This can return -ENOENT if we ask for a root that doesn't exist, but 1799 * since this is called via the backref walking code we won't be looking 1800 * up a root that doesn't exist, unless there's corruption. So if root 1801 * != NULL just return it. 1802 */ 1803 root = btrfs_get_global_root(fs_info, objectid); 1804 if (root) 1805 return root; 1806 1807 root = btrfs_lookup_fs_root(fs_info, objectid); 1808 if (root) 1809 return root; 1810 1811 key.objectid = objectid; 1812 key.type = BTRFS_ROOT_ITEM_KEY; 1813 key.offset = (u64)-1; 1814 root = read_tree_root_path(fs_info->tree_root, path, &key); 1815 btrfs_release_path(path); 1816 1817 return root; 1818 } 1819 1820 /* 1821 * called by the kthread helper functions to finally call the bio end_io 1822 * functions. This is where read checksum verification actually happens 1823 */ 1824 static void end_workqueue_fn(struct btrfs_work *work) 1825 { 1826 struct bio *bio; 1827 struct btrfs_end_io_wq *end_io_wq; 1828 1829 end_io_wq = container_of(work, struct btrfs_end_io_wq, work); 1830 bio = end_io_wq->bio; 1831 1832 bio->bi_status = end_io_wq->status; 1833 bio->bi_private = end_io_wq->private; 1834 bio->bi_end_io = end_io_wq->end_io; 1835 bio_endio(bio); 1836 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); 1837 } 1838 1839 static int cleaner_kthread(void *arg) 1840 { 1841 struct btrfs_root *root = arg; 1842 struct btrfs_fs_info *fs_info = root->fs_info; 1843 int again; 1844 1845 while (1) { 1846 again = 0; 1847 1848 set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); 1849 1850 /* Make the cleaner go to sleep early. */ 1851 if (btrfs_need_cleaner_sleep(fs_info)) 1852 goto sleep; 1853 1854 /* 1855 * Do not do anything if we might cause open_ctree() to block 1856 * before we have finished mounting the filesystem. 1857 */ 1858 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1859 goto sleep; 1860 1861 if (!mutex_trylock(&fs_info->cleaner_mutex)) 1862 goto sleep; 1863 1864 /* 1865 * Avoid the problem that we change the status of the fs 1866 * during the above check and trylock. 1867 */ 1868 if (btrfs_need_cleaner_sleep(fs_info)) { 1869 mutex_unlock(&fs_info->cleaner_mutex); 1870 goto sleep; 1871 } 1872 1873 btrfs_run_delayed_iputs(fs_info); 1874 1875 again = btrfs_clean_one_deleted_snapshot(root); 1876 mutex_unlock(&fs_info->cleaner_mutex); 1877 1878 /* 1879 * The defragger has dealt with the R/O remount and umount, 1880 * needn't do anything special here. 1881 */ 1882 btrfs_run_defrag_inodes(fs_info); 1883 1884 /* 1885 * Acquires fs_info->reclaim_bgs_lock to avoid racing 1886 * with relocation (btrfs_relocate_chunk) and relocation 1887 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) 1888 * after acquiring fs_info->reclaim_bgs_lock. So we 1889 * can't hold, nor need to, fs_info->cleaner_mutex when deleting 1890 * unused block groups. 1891 */ 1892 btrfs_delete_unused_bgs(fs_info); 1893 1894 /* 1895 * Reclaim block groups in the reclaim_bgs list after we deleted 1896 * all unused block_groups. This possibly gives us some more free 1897 * space. 1898 */ 1899 btrfs_reclaim_bgs(fs_info); 1900 sleep: 1901 clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); 1902 if (kthread_should_park()) 1903 kthread_parkme(); 1904 if (kthread_should_stop()) 1905 return 0; 1906 if (!again) { 1907 set_current_state(TASK_INTERRUPTIBLE); 1908 schedule(); 1909 __set_current_state(TASK_RUNNING); 1910 } 1911 } 1912 } 1913 1914 static int transaction_kthread(void *arg) 1915 { 1916 struct btrfs_root *root = arg; 1917 struct btrfs_fs_info *fs_info = root->fs_info; 1918 struct btrfs_trans_handle *trans; 1919 struct btrfs_transaction *cur; 1920 u64 transid; 1921 time64_t delta; 1922 unsigned long delay; 1923 bool cannot_commit; 1924 1925 do { 1926 cannot_commit = false; 1927 delay = msecs_to_jiffies(fs_info->commit_interval * 1000); 1928 mutex_lock(&fs_info->transaction_kthread_mutex); 1929 1930 spin_lock(&fs_info->trans_lock); 1931 cur = fs_info->running_transaction; 1932 if (!cur) { 1933 spin_unlock(&fs_info->trans_lock); 1934 goto sleep; 1935 } 1936 1937 delta = ktime_get_seconds() - cur->start_time; 1938 if (cur->state < TRANS_STATE_COMMIT_START && 1939 delta < fs_info->commit_interval) { 1940 spin_unlock(&fs_info->trans_lock); 1941 delay -= msecs_to_jiffies((delta - 1) * 1000); 1942 delay = min(delay, 1943 msecs_to_jiffies(fs_info->commit_interval * 1000)); 1944 goto sleep; 1945 } 1946 transid = cur->transid; 1947 spin_unlock(&fs_info->trans_lock); 1948 1949 /* If the file system is aborted, this will always fail. */ 1950 trans = btrfs_attach_transaction(root); 1951 if (IS_ERR(trans)) { 1952 if (PTR_ERR(trans) != -ENOENT) 1953 cannot_commit = true; 1954 goto sleep; 1955 } 1956 if (transid == trans->transid) { 1957 btrfs_commit_transaction(trans); 1958 } else { 1959 btrfs_end_transaction(trans); 1960 } 1961 sleep: 1962 wake_up_process(fs_info->cleaner_kthread); 1963 mutex_unlock(&fs_info->transaction_kthread_mutex); 1964 1965 if (BTRFS_FS_ERROR(fs_info)) 1966 btrfs_cleanup_transaction(fs_info); 1967 if (!kthread_should_stop() && 1968 (!btrfs_transaction_blocked(fs_info) || 1969 cannot_commit)) 1970 schedule_timeout_interruptible(delay); 1971 } while (!kthread_should_stop()); 1972 return 0; 1973 } 1974 1975 /* 1976 * This will find the highest generation in the array of root backups. The 1977 * index of the highest array is returned, or -EINVAL if we can't find 1978 * anything. 1979 * 1980 * We check to make sure the array is valid by comparing the 1981 * generation of the latest root in the array with the generation 1982 * in the super block. If they don't match we pitch it. 1983 */ 1984 static int find_newest_super_backup(struct btrfs_fs_info *info) 1985 { 1986 const u64 newest_gen = btrfs_super_generation(info->super_copy); 1987 u64 cur; 1988 struct btrfs_root_backup *root_backup; 1989 int i; 1990 1991 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { 1992 root_backup = info->super_copy->super_roots + i; 1993 cur = btrfs_backup_tree_root_gen(root_backup); 1994 if (cur == newest_gen) 1995 return i; 1996 } 1997 1998 return -EINVAL; 1999 } 2000 2001 /* 2002 * copy all the root pointers into the super backup array. 2003 * this will bump the backup pointer by one when it is 2004 * done 2005 */ 2006 static void backup_super_roots(struct btrfs_fs_info *info) 2007 { 2008 const int next_backup = info->backup_root_index; 2009 struct btrfs_root_backup *root_backup; 2010 2011 root_backup = info->super_for_commit->super_roots + next_backup; 2012 2013 /* 2014 * make sure all of our padding and empty slots get zero filled 2015 * regardless of which ones we use today 2016 */ 2017 memset(root_backup, 0, sizeof(*root_backup)); 2018 2019 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; 2020 2021 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); 2022 btrfs_set_backup_tree_root_gen(root_backup, 2023 btrfs_header_generation(info->tree_root->node)); 2024 2025 btrfs_set_backup_tree_root_level(root_backup, 2026 btrfs_header_level(info->tree_root->node)); 2027 2028 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); 2029 btrfs_set_backup_chunk_root_gen(root_backup, 2030 btrfs_header_generation(info->chunk_root->node)); 2031 btrfs_set_backup_chunk_root_level(root_backup, 2032 btrfs_header_level(info->chunk_root->node)); 2033 2034 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); 2035 btrfs_set_backup_extent_root_gen(root_backup, 2036 btrfs_header_generation(info->extent_root->node)); 2037 btrfs_set_backup_extent_root_level(root_backup, 2038 btrfs_header_level(info->extent_root->node)); 2039 2040 /* 2041 * we might commit during log recovery, which happens before we set 2042 * the fs_root. Make sure it is valid before we fill it in. 2043 */ 2044 if (info->fs_root && info->fs_root->node) { 2045 btrfs_set_backup_fs_root(root_backup, 2046 info->fs_root->node->start); 2047 btrfs_set_backup_fs_root_gen(root_backup, 2048 btrfs_header_generation(info->fs_root->node)); 2049 btrfs_set_backup_fs_root_level(root_backup, 2050 btrfs_header_level(info->fs_root->node)); 2051 } 2052 2053 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); 2054 btrfs_set_backup_dev_root_gen(root_backup, 2055 btrfs_header_generation(info->dev_root->node)); 2056 btrfs_set_backup_dev_root_level(root_backup, 2057 btrfs_header_level(info->dev_root->node)); 2058 2059 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); 2060 btrfs_set_backup_csum_root_gen(root_backup, 2061 btrfs_header_generation(info->csum_root->node)); 2062 btrfs_set_backup_csum_root_level(root_backup, 2063 btrfs_header_level(info->csum_root->node)); 2064 2065 btrfs_set_backup_total_bytes(root_backup, 2066 btrfs_super_total_bytes(info->super_copy)); 2067 btrfs_set_backup_bytes_used(root_backup, 2068 btrfs_super_bytes_used(info->super_copy)); 2069 btrfs_set_backup_num_devices(root_backup, 2070 btrfs_super_num_devices(info->super_copy)); 2071 2072 /* 2073 * if we don't copy this out to the super_copy, it won't get remembered 2074 * for the next commit 2075 */ 2076 memcpy(&info->super_copy->super_roots, 2077 &info->super_for_commit->super_roots, 2078 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); 2079 } 2080 2081 /* 2082 * read_backup_root - Reads a backup root based on the passed priority. Prio 0 2083 * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots 2084 * 2085 * fs_info - filesystem whose backup roots need to be read 2086 * priority - priority of backup root required 2087 * 2088 * Returns backup root index on success and -EINVAL otherwise. 2089 */ 2090 static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority) 2091 { 2092 int backup_index = find_newest_super_backup(fs_info); 2093 struct btrfs_super_block *super = fs_info->super_copy; 2094 struct btrfs_root_backup *root_backup; 2095 2096 if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) { 2097 if (priority == 0) 2098 return backup_index; 2099 2100 backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority; 2101 backup_index %= BTRFS_NUM_BACKUP_ROOTS; 2102 } else { 2103 return -EINVAL; 2104 } 2105 2106 root_backup = super->super_roots + backup_index; 2107 2108 btrfs_set_super_generation(super, 2109 btrfs_backup_tree_root_gen(root_backup)); 2110 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); 2111 btrfs_set_super_root_level(super, 2112 btrfs_backup_tree_root_level(root_backup)); 2113 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); 2114 2115 /* 2116 * Fixme: the total bytes and num_devices need to match or we should 2117 * need a fsck 2118 */ 2119 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); 2120 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); 2121 2122 return backup_index; 2123 } 2124 2125 /* helper to cleanup workers */ 2126 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) 2127 { 2128 btrfs_destroy_workqueue(fs_info->fixup_workers); 2129 btrfs_destroy_workqueue(fs_info->delalloc_workers); 2130 btrfs_destroy_workqueue(fs_info->workers); 2131 btrfs_destroy_workqueue(fs_info->endio_workers); 2132 btrfs_destroy_workqueue(fs_info->endio_raid56_workers); 2133 btrfs_destroy_workqueue(fs_info->rmw_workers); 2134 btrfs_destroy_workqueue(fs_info->endio_write_workers); 2135 btrfs_destroy_workqueue(fs_info->endio_freespace_worker); 2136 btrfs_destroy_workqueue(fs_info->delayed_workers); 2137 btrfs_destroy_workqueue(fs_info->caching_workers); 2138 btrfs_destroy_workqueue(fs_info->readahead_workers); 2139 btrfs_destroy_workqueue(fs_info->flush_workers); 2140 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); 2141 if (fs_info->discard_ctl.discard_workers) 2142 destroy_workqueue(fs_info->discard_ctl.discard_workers); 2143 /* 2144 * Now that all other work queues are destroyed, we can safely destroy 2145 * the queues used for metadata I/O, since tasks from those other work 2146 * queues can do metadata I/O operations. 2147 */ 2148 btrfs_destroy_workqueue(fs_info->endio_meta_workers); 2149 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); 2150 } 2151 2152 static void free_root_extent_buffers(struct btrfs_root *root) 2153 { 2154 if (root) { 2155 free_extent_buffer(root->node); 2156 free_extent_buffer(root->commit_root); 2157 root->node = NULL; 2158 root->commit_root = NULL; 2159 } 2160 } 2161 2162 /* helper to cleanup tree roots */ 2163 static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root) 2164 { 2165 free_root_extent_buffers(info->tree_root); 2166 2167 free_root_extent_buffers(info->dev_root); 2168 free_root_extent_buffers(info->extent_root); 2169 free_root_extent_buffers(info->csum_root); 2170 free_root_extent_buffers(info->quota_root); 2171 free_root_extent_buffers(info->uuid_root); 2172 free_root_extent_buffers(info->fs_root); 2173 free_root_extent_buffers(info->data_reloc_root); 2174 if (free_chunk_root) 2175 free_root_extent_buffers(info->chunk_root); 2176 free_root_extent_buffers(info->free_space_root); 2177 } 2178 2179 void btrfs_put_root(struct btrfs_root *root) 2180 { 2181 if (!root) 2182 return; 2183 2184 if (refcount_dec_and_test(&root->refs)) { 2185 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); 2186 WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)); 2187 if (root->anon_dev) 2188 free_anon_bdev(root->anon_dev); 2189 btrfs_drew_lock_destroy(&root->snapshot_lock); 2190 free_root_extent_buffers(root); 2191 #ifdef CONFIG_BTRFS_DEBUG 2192 spin_lock(&root->fs_info->fs_roots_radix_lock); 2193 list_del_init(&root->leak_list); 2194 spin_unlock(&root->fs_info->fs_roots_radix_lock); 2195 #endif 2196 kfree(root); 2197 } 2198 } 2199 2200 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) 2201 { 2202 int ret; 2203 struct btrfs_root *gang[8]; 2204 int i; 2205 2206 while (!list_empty(&fs_info->dead_roots)) { 2207 gang[0] = list_entry(fs_info->dead_roots.next, 2208 struct btrfs_root, root_list); 2209 list_del(&gang[0]->root_list); 2210 2211 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) 2212 btrfs_drop_and_free_fs_root(fs_info, gang[0]); 2213 btrfs_put_root(gang[0]); 2214 } 2215 2216 while (1) { 2217 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 2218 (void **)gang, 0, 2219 ARRAY_SIZE(gang)); 2220 if (!ret) 2221 break; 2222 for (i = 0; i < ret; i++) 2223 btrfs_drop_and_free_fs_root(fs_info, gang[i]); 2224 } 2225 } 2226 2227 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) 2228 { 2229 mutex_init(&fs_info->scrub_lock); 2230 atomic_set(&fs_info->scrubs_running, 0); 2231 atomic_set(&fs_info->scrub_pause_req, 0); 2232 atomic_set(&fs_info->scrubs_paused, 0); 2233 atomic_set(&fs_info->scrub_cancel_req, 0); 2234 init_waitqueue_head(&fs_info->scrub_pause_wait); 2235 refcount_set(&fs_info->scrub_workers_refcnt, 0); 2236 } 2237 2238 static void btrfs_init_balance(struct btrfs_fs_info *fs_info) 2239 { 2240 spin_lock_init(&fs_info->balance_lock); 2241 mutex_init(&fs_info->balance_mutex); 2242 atomic_set(&fs_info->balance_pause_req, 0); 2243 atomic_set(&fs_info->balance_cancel_req, 0); 2244 fs_info->balance_ctl = NULL; 2245 init_waitqueue_head(&fs_info->balance_wait_q); 2246 atomic_set(&fs_info->reloc_cancel_req, 0); 2247 } 2248 2249 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info) 2250 { 2251 struct inode *inode = fs_info->btree_inode; 2252 2253 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; 2254 set_nlink(inode, 1); 2255 /* 2256 * we set the i_size on the btree inode to the max possible int. 2257 * the real end of the address space is determined by all of 2258 * the devices in the system 2259 */ 2260 inode->i_size = OFFSET_MAX; 2261 inode->i_mapping->a_ops = &btree_aops; 2262 2263 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 2264 extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree, 2265 IO_TREE_BTREE_INODE_IO, inode); 2266 BTRFS_I(inode)->io_tree.track_uptodate = false; 2267 extent_map_tree_init(&BTRFS_I(inode)->extent_tree); 2268 2269 BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root); 2270 memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key)); 2271 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 2272 btrfs_insert_inode_hash(inode); 2273 } 2274 2275 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) 2276 { 2277 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); 2278 init_rwsem(&fs_info->dev_replace.rwsem); 2279 init_waitqueue_head(&fs_info->dev_replace.replace_wait); 2280 } 2281 2282 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) 2283 { 2284 spin_lock_init(&fs_info->qgroup_lock); 2285 mutex_init(&fs_info->qgroup_ioctl_lock); 2286 fs_info->qgroup_tree = RB_ROOT; 2287 INIT_LIST_HEAD(&fs_info->dirty_qgroups); 2288 fs_info->qgroup_seq = 1; 2289 fs_info->qgroup_ulist = NULL; 2290 fs_info->qgroup_rescan_running = false; 2291 mutex_init(&fs_info->qgroup_rescan_lock); 2292 } 2293 2294 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, 2295 struct btrfs_fs_devices *fs_devices) 2296 { 2297 u32 max_active = fs_info->thread_pool_size; 2298 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; 2299 2300 fs_info->workers = 2301 btrfs_alloc_workqueue(fs_info, "worker", 2302 flags | WQ_HIGHPRI, max_active, 16); 2303 2304 fs_info->delalloc_workers = 2305 btrfs_alloc_workqueue(fs_info, "delalloc", 2306 flags, max_active, 2); 2307 2308 fs_info->flush_workers = 2309 btrfs_alloc_workqueue(fs_info, "flush_delalloc", 2310 flags, max_active, 0); 2311 2312 fs_info->caching_workers = 2313 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0); 2314 2315 fs_info->fixup_workers = 2316 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0); 2317 2318 /* 2319 * endios are largely parallel and should have a very 2320 * low idle thresh 2321 */ 2322 fs_info->endio_workers = 2323 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4); 2324 fs_info->endio_meta_workers = 2325 btrfs_alloc_workqueue(fs_info, "endio-meta", flags, 2326 max_active, 4); 2327 fs_info->endio_meta_write_workers = 2328 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags, 2329 max_active, 2); 2330 fs_info->endio_raid56_workers = 2331 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, 2332 max_active, 4); 2333 fs_info->rmw_workers = 2334 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2); 2335 fs_info->endio_write_workers = 2336 btrfs_alloc_workqueue(fs_info, "endio-write", flags, 2337 max_active, 2); 2338 fs_info->endio_freespace_worker = 2339 btrfs_alloc_workqueue(fs_info, "freespace-write", flags, 2340 max_active, 0); 2341 fs_info->delayed_workers = 2342 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags, 2343 max_active, 0); 2344 fs_info->readahead_workers = 2345 btrfs_alloc_workqueue(fs_info, "readahead", flags, 2346 max_active, 2); 2347 fs_info->qgroup_rescan_workers = 2348 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0); 2349 fs_info->discard_ctl.discard_workers = 2350 alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1); 2351 2352 if (!(fs_info->workers && fs_info->delalloc_workers && 2353 fs_info->flush_workers && 2354 fs_info->endio_workers && fs_info->endio_meta_workers && 2355 fs_info->endio_meta_write_workers && 2356 fs_info->endio_write_workers && fs_info->endio_raid56_workers && 2357 fs_info->endio_freespace_worker && fs_info->rmw_workers && 2358 fs_info->caching_workers && fs_info->readahead_workers && 2359 fs_info->fixup_workers && fs_info->delayed_workers && 2360 fs_info->qgroup_rescan_workers && 2361 fs_info->discard_ctl.discard_workers)) { 2362 return -ENOMEM; 2363 } 2364 2365 return 0; 2366 } 2367 2368 static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type) 2369 { 2370 struct crypto_shash *csum_shash; 2371 const char *csum_driver = btrfs_super_csum_driver(csum_type); 2372 2373 csum_shash = crypto_alloc_shash(csum_driver, 0, 0); 2374 2375 if (IS_ERR(csum_shash)) { 2376 btrfs_err(fs_info, "error allocating %s hash for checksum", 2377 csum_driver); 2378 return PTR_ERR(csum_shash); 2379 } 2380 2381 fs_info->csum_shash = csum_shash; 2382 2383 return 0; 2384 } 2385 2386 static int btrfs_replay_log(struct btrfs_fs_info *fs_info, 2387 struct btrfs_fs_devices *fs_devices) 2388 { 2389 int ret; 2390 struct btrfs_root *log_tree_root; 2391 struct btrfs_super_block *disk_super = fs_info->super_copy; 2392 u64 bytenr = btrfs_super_log_root(disk_super); 2393 int level = btrfs_super_log_root_level(disk_super); 2394 2395 if (fs_devices->rw_devices == 0) { 2396 btrfs_warn(fs_info, "log replay required on RO media"); 2397 return -EIO; 2398 } 2399 2400 log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, 2401 GFP_KERNEL); 2402 if (!log_tree_root) 2403 return -ENOMEM; 2404 2405 log_tree_root->node = read_tree_block(fs_info, bytenr, 2406 BTRFS_TREE_LOG_OBJECTID, 2407 fs_info->generation + 1, level, 2408 NULL); 2409 if (IS_ERR(log_tree_root->node)) { 2410 btrfs_warn(fs_info, "failed to read log tree"); 2411 ret = PTR_ERR(log_tree_root->node); 2412 log_tree_root->node = NULL; 2413 btrfs_put_root(log_tree_root); 2414 return ret; 2415 } else if (!extent_buffer_uptodate(log_tree_root->node)) { 2416 btrfs_err(fs_info, "failed to read log tree"); 2417 btrfs_put_root(log_tree_root); 2418 return -EIO; 2419 } 2420 /* returns with log_tree_root freed on success */ 2421 ret = btrfs_recover_log_trees(log_tree_root); 2422 if (ret) { 2423 btrfs_handle_fs_error(fs_info, ret, 2424 "Failed to recover log tree"); 2425 btrfs_put_root(log_tree_root); 2426 return ret; 2427 } 2428 2429 if (sb_rdonly(fs_info->sb)) { 2430 ret = btrfs_commit_super(fs_info); 2431 if (ret) 2432 return ret; 2433 } 2434 2435 return 0; 2436 } 2437 2438 static int btrfs_read_roots(struct btrfs_fs_info *fs_info) 2439 { 2440 struct btrfs_root *tree_root = fs_info->tree_root; 2441 struct btrfs_root *root; 2442 struct btrfs_key location; 2443 int ret; 2444 2445 BUG_ON(!fs_info->tree_root); 2446 2447 location.objectid = BTRFS_EXTENT_TREE_OBJECTID; 2448 location.type = BTRFS_ROOT_ITEM_KEY; 2449 location.offset = 0; 2450 2451 root = btrfs_read_tree_root(tree_root, &location); 2452 if (IS_ERR(root)) { 2453 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { 2454 ret = PTR_ERR(root); 2455 goto out; 2456 } 2457 } else { 2458 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2459 fs_info->extent_root = root; 2460 } 2461 2462 location.objectid = BTRFS_DEV_TREE_OBJECTID; 2463 root = btrfs_read_tree_root(tree_root, &location); 2464 if (IS_ERR(root)) { 2465 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { 2466 ret = PTR_ERR(root); 2467 goto out; 2468 } 2469 } else { 2470 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2471 fs_info->dev_root = root; 2472 } 2473 /* Initialize fs_info for all devices in any case */ 2474 btrfs_init_devices_late(fs_info); 2475 2476 /* If IGNOREDATACSUMS is set don't bother reading the csum root. */ 2477 if (!btrfs_test_opt(fs_info, IGNOREDATACSUMS)) { 2478 location.objectid = BTRFS_CSUM_TREE_OBJECTID; 2479 root = btrfs_read_tree_root(tree_root, &location); 2480 if (IS_ERR(root)) { 2481 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { 2482 ret = PTR_ERR(root); 2483 goto out; 2484 } 2485 } else { 2486 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2487 fs_info->csum_root = root; 2488 } 2489 } 2490 2491 /* 2492 * This tree can share blocks with some other fs tree during relocation 2493 * and we need a proper setup by btrfs_get_fs_root 2494 */ 2495 root = btrfs_get_fs_root(tree_root->fs_info, 2496 BTRFS_DATA_RELOC_TREE_OBJECTID, true); 2497 if (IS_ERR(root)) { 2498 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { 2499 ret = PTR_ERR(root); 2500 goto out; 2501 } 2502 } else { 2503 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2504 fs_info->data_reloc_root = root; 2505 } 2506 2507 location.objectid = BTRFS_QUOTA_TREE_OBJECTID; 2508 root = btrfs_read_tree_root(tree_root, &location); 2509 if (!IS_ERR(root)) { 2510 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2511 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 2512 fs_info->quota_root = root; 2513 } 2514 2515 location.objectid = BTRFS_UUID_TREE_OBJECTID; 2516 root = btrfs_read_tree_root(tree_root, &location); 2517 if (IS_ERR(root)) { 2518 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { 2519 ret = PTR_ERR(root); 2520 if (ret != -ENOENT) 2521 goto out; 2522 } 2523 } else { 2524 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2525 fs_info->uuid_root = root; 2526 } 2527 2528 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 2529 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; 2530 root = btrfs_read_tree_root(tree_root, &location); 2531 if (IS_ERR(root)) { 2532 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { 2533 ret = PTR_ERR(root); 2534 goto out; 2535 } 2536 } else { 2537 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2538 fs_info->free_space_root = root; 2539 } 2540 } 2541 2542 return 0; 2543 out: 2544 btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d", 2545 location.objectid, ret); 2546 return ret; 2547 } 2548 2549 /* 2550 * Real super block validation 2551 * NOTE: super csum type and incompat features will not be checked here. 2552 * 2553 * @sb: super block to check 2554 * @mirror_num: the super block number to check its bytenr: 2555 * 0 the primary (1st) sb 2556 * 1, 2 2nd and 3rd backup copy 2557 * -1 skip bytenr check 2558 */ 2559 static int validate_super(struct btrfs_fs_info *fs_info, 2560 struct btrfs_super_block *sb, int mirror_num) 2561 { 2562 u64 nodesize = btrfs_super_nodesize(sb); 2563 u64 sectorsize = btrfs_super_sectorsize(sb); 2564 int ret = 0; 2565 2566 if (btrfs_super_magic(sb) != BTRFS_MAGIC) { 2567 btrfs_err(fs_info, "no valid FS found"); 2568 ret = -EINVAL; 2569 } 2570 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) { 2571 btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu", 2572 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); 2573 ret = -EINVAL; 2574 } 2575 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { 2576 btrfs_err(fs_info, "tree_root level too big: %d >= %d", 2577 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); 2578 ret = -EINVAL; 2579 } 2580 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { 2581 btrfs_err(fs_info, "chunk_root level too big: %d >= %d", 2582 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); 2583 ret = -EINVAL; 2584 } 2585 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { 2586 btrfs_err(fs_info, "log_root level too big: %d >= %d", 2587 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); 2588 ret = -EINVAL; 2589 } 2590 2591 /* 2592 * Check sectorsize and nodesize first, other check will need it. 2593 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here. 2594 */ 2595 if (!is_power_of_2(sectorsize) || sectorsize < 4096 || 2596 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { 2597 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize); 2598 ret = -EINVAL; 2599 } 2600 2601 /* 2602 * For 4K page size, we only support 4K sector size. 2603 * For 64K page size, we support 64K and 4K sector sizes. 2604 */ 2605 if ((PAGE_SIZE == SZ_4K && sectorsize != PAGE_SIZE) || 2606 (PAGE_SIZE == SZ_64K && (sectorsize != SZ_4K && 2607 sectorsize != SZ_64K))) { 2608 btrfs_err(fs_info, 2609 "sectorsize %llu not yet supported for page size %lu", 2610 sectorsize, PAGE_SIZE); 2611 ret = -EINVAL; 2612 } 2613 2614 if (!is_power_of_2(nodesize) || nodesize < sectorsize || 2615 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) { 2616 btrfs_err(fs_info, "invalid nodesize %llu", nodesize); 2617 ret = -EINVAL; 2618 } 2619 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) { 2620 btrfs_err(fs_info, "invalid leafsize %u, should be %llu", 2621 le32_to_cpu(sb->__unused_leafsize), nodesize); 2622 ret = -EINVAL; 2623 } 2624 2625 /* Root alignment check */ 2626 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { 2627 btrfs_warn(fs_info, "tree_root block unaligned: %llu", 2628 btrfs_super_root(sb)); 2629 ret = -EINVAL; 2630 } 2631 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { 2632 btrfs_warn(fs_info, "chunk_root block unaligned: %llu", 2633 btrfs_super_chunk_root(sb)); 2634 ret = -EINVAL; 2635 } 2636 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { 2637 btrfs_warn(fs_info, "log_root block unaligned: %llu", 2638 btrfs_super_log_root(sb)); 2639 ret = -EINVAL; 2640 } 2641 2642 if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid, 2643 BTRFS_FSID_SIZE)) { 2644 btrfs_err(fs_info, 2645 "superblock fsid doesn't match fsid of fs_devices: %pU != %pU", 2646 fs_info->super_copy->fsid, fs_info->fs_devices->fsid); 2647 ret = -EINVAL; 2648 } 2649 2650 if (btrfs_fs_incompat(fs_info, METADATA_UUID) && 2651 memcmp(fs_info->fs_devices->metadata_uuid, 2652 fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) { 2653 btrfs_err(fs_info, 2654 "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU", 2655 fs_info->super_copy->metadata_uuid, 2656 fs_info->fs_devices->metadata_uuid); 2657 ret = -EINVAL; 2658 } 2659 2660 if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid, 2661 BTRFS_FSID_SIZE) != 0) { 2662 btrfs_err(fs_info, 2663 "dev_item UUID does not match metadata fsid: %pU != %pU", 2664 fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid); 2665 ret = -EINVAL; 2666 } 2667 2668 /* 2669 * Hint to catch really bogus numbers, bitflips or so, more exact checks are 2670 * done later 2671 */ 2672 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) { 2673 btrfs_err(fs_info, "bytes_used is too small %llu", 2674 btrfs_super_bytes_used(sb)); 2675 ret = -EINVAL; 2676 } 2677 if (!is_power_of_2(btrfs_super_stripesize(sb))) { 2678 btrfs_err(fs_info, "invalid stripesize %u", 2679 btrfs_super_stripesize(sb)); 2680 ret = -EINVAL; 2681 } 2682 if (btrfs_super_num_devices(sb) > (1UL << 31)) 2683 btrfs_warn(fs_info, "suspicious number of devices: %llu", 2684 btrfs_super_num_devices(sb)); 2685 if (btrfs_super_num_devices(sb) == 0) { 2686 btrfs_err(fs_info, "number of devices is 0"); 2687 ret = -EINVAL; 2688 } 2689 2690 if (mirror_num >= 0 && 2691 btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) { 2692 btrfs_err(fs_info, "super offset mismatch %llu != %u", 2693 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); 2694 ret = -EINVAL; 2695 } 2696 2697 /* 2698 * Obvious sys_chunk_array corruptions, it must hold at least one key 2699 * and one chunk 2700 */ 2701 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 2702 btrfs_err(fs_info, "system chunk array too big %u > %u", 2703 btrfs_super_sys_array_size(sb), 2704 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); 2705 ret = -EINVAL; 2706 } 2707 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) 2708 + sizeof(struct btrfs_chunk)) { 2709 btrfs_err(fs_info, "system chunk array too small %u < %zu", 2710 btrfs_super_sys_array_size(sb), 2711 sizeof(struct btrfs_disk_key) 2712 + sizeof(struct btrfs_chunk)); 2713 ret = -EINVAL; 2714 } 2715 2716 /* 2717 * The generation is a global counter, we'll trust it more than the others 2718 * but it's still possible that it's the one that's wrong. 2719 */ 2720 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) 2721 btrfs_warn(fs_info, 2722 "suspicious: generation < chunk_root_generation: %llu < %llu", 2723 btrfs_super_generation(sb), 2724 btrfs_super_chunk_root_generation(sb)); 2725 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) 2726 && btrfs_super_cache_generation(sb) != (u64)-1) 2727 btrfs_warn(fs_info, 2728 "suspicious: generation < cache_generation: %llu < %llu", 2729 btrfs_super_generation(sb), 2730 btrfs_super_cache_generation(sb)); 2731 2732 return ret; 2733 } 2734 2735 /* 2736 * Validation of super block at mount time. 2737 * Some checks already done early at mount time, like csum type and incompat 2738 * flags will be skipped. 2739 */ 2740 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info) 2741 { 2742 return validate_super(fs_info, fs_info->super_copy, 0); 2743 } 2744 2745 /* 2746 * Validation of super block at write time. 2747 * Some checks like bytenr check will be skipped as their values will be 2748 * overwritten soon. 2749 * Extra checks like csum type and incompat flags will be done here. 2750 */ 2751 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info, 2752 struct btrfs_super_block *sb) 2753 { 2754 int ret; 2755 2756 ret = validate_super(fs_info, sb, -1); 2757 if (ret < 0) 2758 goto out; 2759 if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) { 2760 ret = -EUCLEAN; 2761 btrfs_err(fs_info, "invalid csum type, has %u want %u", 2762 btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32); 2763 goto out; 2764 } 2765 if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) { 2766 ret = -EUCLEAN; 2767 btrfs_err(fs_info, 2768 "invalid incompat flags, has 0x%llx valid mask 0x%llx", 2769 btrfs_super_incompat_flags(sb), 2770 (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP); 2771 goto out; 2772 } 2773 out: 2774 if (ret < 0) 2775 btrfs_err(fs_info, 2776 "super block corruption detected before writing it to disk"); 2777 return ret; 2778 } 2779 2780 static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) 2781 { 2782 int backup_index = find_newest_super_backup(fs_info); 2783 struct btrfs_super_block *sb = fs_info->super_copy; 2784 struct btrfs_root *tree_root = fs_info->tree_root; 2785 bool handle_error = false; 2786 int ret = 0; 2787 int i; 2788 2789 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { 2790 u64 generation; 2791 int level; 2792 2793 if (handle_error) { 2794 if (!IS_ERR(tree_root->node)) 2795 free_extent_buffer(tree_root->node); 2796 tree_root->node = NULL; 2797 2798 if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) 2799 break; 2800 2801 free_root_pointers(fs_info, 0); 2802 2803 /* 2804 * Don't use the log in recovery mode, it won't be 2805 * valid 2806 */ 2807 btrfs_set_super_log_root(sb, 0); 2808 2809 /* We can't trust the free space cache either */ 2810 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); 2811 2812 ret = read_backup_root(fs_info, i); 2813 backup_index = ret; 2814 if (ret < 0) 2815 return ret; 2816 } 2817 generation = btrfs_super_generation(sb); 2818 level = btrfs_super_root_level(sb); 2819 tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb), 2820 BTRFS_ROOT_TREE_OBJECTID, 2821 generation, level, NULL); 2822 if (IS_ERR(tree_root->node)) { 2823 handle_error = true; 2824 ret = PTR_ERR(tree_root->node); 2825 tree_root->node = NULL; 2826 btrfs_warn(fs_info, "couldn't read tree root"); 2827 continue; 2828 2829 } else if (!extent_buffer_uptodate(tree_root->node)) { 2830 handle_error = true; 2831 ret = -EIO; 2832 btrfs_warn(fs_info, "error while reading tree root"); 2833 continue; 2834 } 2835 2836 btrfs_set_root_node(&tree_root->root_item, tree_root->node); 2837 tree_root->commit_root = btrfs_root_node(tree_root); 2838 btrfs_set_root_refs(&tree_root->root_item, 1); 2839 2840 /* 2841 * No need to hold btrfs_root::objectid_mutex since the fs 2842 * hasn't been fully initialised and we are the only user 2843 */ 2844 ret = btrfs_init_root_free_objectid(tree_root); 2845 if (ret < 0) { 2846 handle_error = true; 2847 continue; 2848 } 2849 2850 ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID); 2851 2852 ret = btrfs_read_roots(fs_info); 2853 if (ret < 0) { 2854 handle_error = true; 2855 continue; 2856 } 2857 2858 /* All successful */ 2859 fs_info->generation = generation; 2860 fs_info->last_trans_committed = generation; 2861 2862 /* Always begin writing backup roots after the one being used */ 2863 if (backup_index < 0) { 2864 fs_info->backup_root_index = 0; 2865 } else { 2866 fs_info->backup_root_index = backup_index + 1; 2867 fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS; 2868 } 2869 break; 2870 } 2871 2872 return ret; 2873 } 2874 2875 void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) 2876 { 2877 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); 2878 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); 2879 INIT_LIST_HEAD(&fs_info->trans_list); 2880 INIT_LIST_HEAD(&fs_info->dead_roots); 2881 INIT_LIST_HEAD(&fs_info->delayed_iputs); 2882 INIT_LIST_HEAD(&fs_info->delalloc_roots); 2883 INIT_LIST_HEAD(&fs_info->caching_block_groups); 2884 spin_lock_init(&fs_info->delalloc_root_lock); 2885 spin_lock_init(&fs_info->trans_lock); 2886 spin_lock_init(&fs_info->fs_roots_radix_lock); 2887 spin_lock_init(&fs_info->delayed_iput_lock); 2888 spin_lock_init(&fs_info->defrag_inodes_lock); 2889 spin_lock_init(&fs_info->super_lock); 2890 spin_lock_init(&fs_info->buffer_lock); 2891 spin_lock_init(&fs_info->unused_bgs_lock); 2892 spin_lock_init(&fs_info->treelog_bg_lock); 2893 spin_lock_init(&fs_info->zone_active_bgs_lock); 2894 spin_lock_init(&fs_info->relocation_bg_lock); 2895 rwlock_init(&fs_info->tree_mod_log_lock); 2896 mutex_init(&fs_info->unused_bg_unpin_mutex); 2897 mutex_init(&fs_info->reclaim_bgs_lock); 2898 mutex_init(&fs_info->reloc_mutex); 2899 mutex_init(&fs_info->delalloc_root_mutex); 2900 mutex_init(&fs_info->zoned_meta_io_lock); 2901 seqlock_init(&fs_info->profiles_lock); 2902 2903 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 2904 INIT_LIST_HEAD(&fs_info->space_info); 2905 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); 2906 INIT_LIST_HEAD(&fs_info->unused_bgs); 2907 INIT_LIST_HEAD(&fs_info->reclaim_bgs); 2908 INIT_LIST_HEAD(&fs_info->zone_active_bgs); 2909 #ifdef CONFIG_BTRFS_DEBUG 2910 INIT_LIST_HEAD(&fs_info->allocated_roots); 2911 INIT_LIST_HEAD(&fs_info->allocated_ebs); 2912 spin_lock_init(&fs_info->eb_leak_lock); 2913 #endif 2914 extent_map_tree_init(&fs_info->mapping_tree); 2915 btrfs_init_block_rsv(&fs_info->global_block_rsv, 2916 BTRFS_BLOCK_RSV_GLOBAL); 2917 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); 2918 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); 2919 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); 2920 btrfs_init_block_rsv(&fs_info->delayed_block_rsv, 2921 BTRFS_BLOCK_RSV_DELOPS); 2922 btrfs_init_block_rsv(&fs_info->delayed_refs_rsv, 2923 BTRFS_BLOCK_RSV_DELREFS); 2924 2925 atomic_set(&fs_info->async_delalloc_pages, 0); 2926 atomic_set(&fs_info->defrag_running, 0); 2927 atomic_set(&fs_info->reada_works_cnt, 0); 2928 atomic_set(&fs_info->nr_delayed_iputs, 0); 2929 atomic64_set(&fs_info->tree_mod_seq, 0); 2930 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; 2931 fs_info->metadata_ratio = 0; 2932 fs_info->defrag_inodes = RB_ROOT; 2933 atomic64_set(&fs_info->free_chunk_space, 0); 2934 fs_info->tree_mod_log = RB_ROOT; 2935 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 2936 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ 2937 /* readahead state */ 2938 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 2939 spin_lock_init(&fs_info->reada_lock); 2940 btrfs_init_ref_verify(fs_info); 2941 2942 fs_info->thread_pool_size = min_t(unsigned long, 2943 num_online_cpus() + 2, 8); 2944 2945 INIT_LIST_HEAD(&fs_info->ordered_roots); 2946 spin_lock_init(&fs_info->ordered_root_lock); 2947 2948 btrfs_init_scrub(fs_info); 2949 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 2950 fs_info->check_integrity_print_mask = 0; 2951 #endif 2952 btrfs_init_balance(fs_info); 2953 btrfs_init_async_reclaim_work(fs_info); 2954 2955 spin_lock_init(&fs_info->block_group_cache_lock); 2956 fs_info->block_group_cache_tree = RB_ROOT; 2957 fs_info->first_logical_byte = (u64)-1; 2958 2959 extent_io_tree_init(fs_info, &fs_info->excluded_extents, 2960 IO_TREE_FS_EXCLUDED_EXTENTS, NULL); 2961 set_bit(BTRFS_FS_BARRIER, &fs_info->flags); 2962 2963 mutex_init(&fs_info->ordered_operations_mutex); 2964 mutex_init(&fs_info->tree_log_mutex); 2965 mutex_init(&fs_info->chunk_mutex); 2966 mutex_init(&fs_info->transaction_kthread_mutex); 2967 mutex_init(&fs_info->cleaner_mutex); 2968 mutex_init(&fs_info->ro_block_group_mutex); 2969 init_rwsem(&fs_info->commit_root_sem); 2970 init_rwsem(&fs_info->cleanup_work_sem); 2971 init_rwsem(&fs_info->subvol_sem); 2972 sema_init(&fs_info->uuid_tree_rescan_sem, 1); 2973 2974 btrfs_init_dev_replace_locks(fs_info); 2975 btrfs_init_qgroup(fs_info); 2976 btrfs_discard_init(fs_info); 2977 2978 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); 2979 btrfs_init_free_cluster(&fs_info->data_alloc_cluster); 2980 2981 init_waitqueue_head(&fs_info->transaction_throttle); 2982 init_waitqueue_head(&fs_info->transaction_wait); 2983 init_waitqueue_head(&fs_info->transaction_blocked_wait); 2984 init_waitqueue_head(&fs_info->async_submit_wait); 2985 init_waitqueue_head(&fs_info->delayed_iputs_wait); 2986 2987 /* Usable values until the real ones are cached from the superblock */ 2988 fs_info->nodesize = 4096; 2989 fs_info->sectorsize = 4096; 2990 fs_info->sectorsize_bits = ilog2(4096); 2991 fs_info->stripesize = 4096; 2992 2993 spin_lock_init(&fs_info->swapfile_pins_lock); 2994 fs_info->swapfile_pins = RB_ROOT; 2995 2996 spin_lock_init(&fs_info->send_reloc_lock); 2997 fs_info->send_in_progress = 0; 2998 2999 fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH; 3000 INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work); 3001 } 3002 3003 static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb) 3004 { 3005 int ret; 3006 3007 fs_info->sb = sb; 3008 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; 3009 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); 3010 3011 ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL); 3012 if (ret) 3013 return ret; 3014 3015 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); 3016 if (ret) 3017 return ret; 3018 3019 fs_info->dirty_metadata_batch = PAGE_SIZE * 3020 (1 + ilog2(nr_cpu_ids)); 3021 3022 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); 3023 if (ret) 3024 return ret; 3025 3026 ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0, 3027 GFP_KERNEL); 3028 if (ret) 3029 return ret; 3030 3031 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), 3032 GFP_KERNEL); 3033 if (!fs_info->delayed_root) 3034 return -ENOMEM; 3035 btrfs_init_delayed_root(fs_info->delayed_root); 3036 3037 if (sb_rdonly(sb)) 3038 set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state); 3039 3040 return btrfs_alloc_stripe_hash_table(fs_info); 3041 } 3042 3043 static int btrfs_uuid_rescan_kthread(void *data) 3044 { 3045 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; 3046 int ret; 3047 3048 /* 3049 * 1st step is to iterate through the existing UUID tree and 3050 * to delete all entries that contain outdated data. 3051 * 2nd step is to add all missing entries to the UUID tree. 3052 */ 3053 ret = btrfs_uuid_tree_iterate(fs_info); 3054 if (ret < 0) { 3055 if (ret != -EINTR) 3056 btrfs_warn(fs_info, "iterating uuid_tree failed %d", 3057 ret); 3058 up(&fs_info->uuid_tree_rescan_sem); 3059 return ret; 3060 } 3061 return btrfs_uuid_scan_kthread(data); 3062 } 3063 3064 static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) 3065 { 3066 struct task_struct *task; 3067 3068 down(&fs_info->uuid_tree_rescan_sem); 3069 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); 3070 if (IS_ERR(task)) { 3071 /* fs_info->update_uuid_tree_gen remains 0 in all error case */ 3072 btrfs_warn(fs_info, "failed to start uuid_rescan task"); 3073 up(&fs_info->uuid_tree_rescan_sem); 3074 return PTR_ERR(task); 3075 } 3076 3077 return 0; 3078 } 3079 3080 /* 3081 * Some options only have meaning at mount time and shouldn't persist across 3082 * remounts, or be displayed. Clear these at the end of mount and remount 3083 * code paths. 3084 */ 3085 void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info) 3086 { 3087 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); 3088 btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE); 3089 } 3090 3091 /* 3092 * Mounting logic specific to read-write file systems. Shared by open_ctree 3093 * and btrfs_remount when remounting from read-only to read-write. 3094 */ 3095 int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info) 3096 { 3097 int ret; 3098 const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE); 3099 bool clear_free_space_tree = false; 3100 3101 if (btrfs_test_opt(fs_info, CLEAR_CACHE) && 3102 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 3103 clear_free_space_tree = true; 3104 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 3105 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) { 3106 btrfs_warn(fs_info, "free space tree is invalid"); 3107 clear_free_space_tree = true; 3108 } 3109 3110 if (clear_free_space_tree) { 3111 btrfs_info(fs_info, "clearing free space tree"); 3112 ret = btrfs_clear_free_space_tree(fs_info); 3113 if (ret) { 3114 btrfs_warn(fs_info, 3115 "failed to clear free space tree: %d", ret); 3116 goto out; 3117 } 3118 } 3119 3120 /* 3121 * btrfs_find_orphan_roots() is responsible for finding all the dead 3122 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load 3123 * them into the fs_info->fs_roots_radix tree. This must be done before 3124 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it 3125 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan 3126 * item before the root's tree is deleted - this means that if we unmount 3127 * or crash before the deletion completes, on the next mount we will not 3128 * delete what remains of the tree because the orphan item does not 3129 * exists anymore, which is what tells us we have a pending deletion. 3130 */ 3131 ret = btrfs_find_orphan_roots(fs_info); 3132 if (ret) 3133 goto out; 3134 3135 ret = btrfs_cleanup_fs_roots(fs_info); 3136 if (ret) 3137 goto out; 3138 3139 down_read(&fs_info->cleanup_work_sem); 3140 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || 3141 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { 3142 up_read(&fs_info->cleanup_work_sem); 3143 goto out; 3144 } 3145 up_read(&fs_info->cleanup_work_sem); 3146 3147 mutex_lock(&fs_info->cleaner_mutex); 3148 ret = btrfs_recover_relocation(fs_info->tree_root); 3149 mutex_unlock(&fs_info->cleaner_mutex); 3150 if (ret < 0) { 3151 btrfs_warn(fs_info, "failed to recover relocation: %d", ret); 3152 goto out; 3153 } 3154 3155 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) && 3156 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 3157 btrfs_info(fs_info, "creating free space tree"); 3158 ret = btrfs_create_free_space_tree(fs_info); 3159 if (ret) { 3160 btrfs_warn(fs_info, 3161 "failed to create free space tree: %d", ret); 3162 goto out; 3163 } 3164 } 3165 3166 if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) { 3167 ret = btrfs_set_free_space_cache_v1_active(fs_info, cache_opt); 3168 if (ret) 3169 goto out; 3170 } 3171 3172 ret = btrfs_resume_balance_async(fs_info); 3173 if (ret) 3174 goto out; 3175 3176 ret = btrfs_resume_dev_replace_async(fs_info); 3177 if (ret) { 3178 btrfs_warn(fs_info, "failed to resume dev_replace"); 3179 goto out; 3180 } 3181 3182 btrfs_qgroup_rescan_resume(fs_info); 3183 3184 if (!fs_info->uuid_root) { 3185 btrfs_info(fs_info, "creating UUID tree"); 3186 ret = btrfs_create_uuid_tree(fs_info); 3187 if (ret) { 3188 btrfs_warn(fs_info, 3189 "failed to create the UUID tree %d", ret); 3190 goto out; 3191 } 3192 } 3193 3194 out: 3195 return ret; 3196 } 3197 3198 int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, 3199 char *options) 3200 { 3201 u32 sectorsize; 3202 u32 nodesize; 3203 u32 stripesize; 3204 u64 generation; 3205 u64 features; 3206 u16 csum_type; 3207 struct btrfs_super_block *disk_super; 3208 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 3209 struct btrfs_root *tree_root; 3210 struct btrfs_root *chunk_root; 3211 int ret; 3212 int err = -EINVAL; 3213 int level; 3214 3215 ret = init_mount_fs_info(fs_info, sb); 3216 if (ret) { 3217 err = ret; 3218 goto fail; 3219 } 3220 3221 /* These need to be init'ed before we start creating inodes and such. */ 3222 tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, 3223 GFP_KERNEL); 3224 fs_info->tree_root = tree_root; 3225 chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID, 3226 GFP_KERNEL); 3227 fs_info->chunk_root = chunk_root; 3228 if (!tree_root || !chunk_root) { 3229 err = -ENOMEM; 3230 goto fail; 3231 } 3232 3233 fs_info->btree_inode = new_inode(sb); 3234 if (!fs_info->btree_inode) { 3235 err = -ENOMEM; 3236 goto fail; 3237 } 3238 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); 3239 btrfs_init_btree_inode(fs_info); 3240 3241 invalidate_bdev(fs_devices->latest_dev->bdev); 3242 3243 /* 3244 * Read super block and check the signature bytes only 3245 */ 3246 disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev); 3247 if (IS_ERR(disk_super)) { 3248 err = PTR_ERR(disk_super); 3249 goto fail_alloc; 3250 } 3251 3252 /* 3253 * Verify the type first, if that or the checksum value are 3254 * corrupted, we'll find out 3255 */ 3256 csum_type = btrfs_super_csum_type(disk_super); 3257 if (!btrfs_supported_super_csum(csum_type)) { 3258 btrfs_err(fs_info, "unsupported checksum algorithm: %u", 3259 csum_type); 3260 err = -EINVAL; 3261 btrfs_release_disk_super(disk_super); 3262 goto fail_alloc; 3263 } 3264 3265 fs_info->csum_size = btrfs_super_csum_size(disk_super); 3266 3267 ret = btrfs_init_csum_hash(fs_info, csum_type); 3268 if (ret) { 3269 err = ret; 3270 btrfs_release_disk_super(disk_super); 3271 goto fail_alloc; 3272 } 3273 3274 /* 3275 * We want to check superblock checksum, the type is stored inside. 3276 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). 3277 */ 3278 if (btrfs_check_super_csum(fs_info, (u8 *)disk_super)) { 3279 btrfs_err(fs_info, "superblock checksum mismatch"); 3280 err = -EINVAL; 3281 btrfs_release_disk_super(disk_super); 3282 goto fail_alloc; 3283 } 3284 3285 /* 3286 * super_copy is zeroed at allocation time and we never touch the 3287 * following bytes up to INFO_SIZE, the checksum is calculated from 3288 * the whole block of INFO_SIZE 3289 */ 3290 memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy)); 3291 btrfs_release_disk_super(disk_super); 3292 3293 disk_super = fs_info->super_copy; 3294 3295 3296 features = btrfs_super_flags(disk_super); 3297 if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) { 3298 features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2; 3299 btrfs_set_super_flags(disk_super, features); 3300 btrfs_info(fs_info, 3301 "found metadata UUID change in progress flag, clearing"); 3302 } 3303 3304 memcpy(fs_info->super_for_commit, fs_info->super_copy, 3305 sizeof(*fs_info->super_for_commit)); 3306 3307 ret = btrfs_validate_mount_super(fs_info); 3308 if (ret) { 3309 btrfs_err(fs_info, "superblock contains fatal errors"); 3310 err = -EINVAL; 3311 goto fail_alloc; 3312 } 3313 3314 if (!btrfs_super_root(disk_super)) 3315 goto fail_alloc; 3316 3317 /* check FS state, whether FS is broken. */ 3318 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) 3319 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); 3320 3321 /* 3322 * In the long term, we'll store the compression type in the super 3323 * block, and it'll be used for per file compression control. 3324 */ 3325 fs_info->compress_type = BTRFS_COMPRESS_ZLIB; 3326 3327 /* 3328 * Flag our filesystem as having big metadata blocks if they are bigger 3329 * than the page size. 3330 */ 3331 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { 3332 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) 3333 btrfs_info(fs_info, 3334 "flagging fs with big metadata feature"); 3335 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; 3336 } 3337 3338 /* Set up fs_info before parsing mount options */ 3339 nodesize = btrfs_super_nodesize(disk_super); 3340 sectorsize = btrfs_super_sectorsize(disk_super); 3341 stripesize = sectorsize; 3342 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); 3343 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); 3344 3345 fs_info->nodesize = nodesize; 3346 fs_info->sectorsize = sectorsize; 3347 fs_info->sectorsize_bits = ilog2(sectorsize); 3348 fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size; 3349 fs_info->stripesize = stripesize; 3350 3351 ret = btrfs_parse_options(fs_info, options, sb->s_flags); 3352 if (ret) { 3353 err = ret; 3354 goto fail_alloc; 3355 } 3356 3357 features = btrfs_super_incompat_flags(disk_super) & 3358 ~BTRFS_FEATURE_INCOMPAT_SUPP; 3359 if (features) { 3360 btrfs_err(fs_info, 3361 "cannot mount because of unsupported optional features (%llx)", 3362 features); 3363 err = -EINVAL; 3364 goto fail_alloc; 3365 } 3366 3367 features = btrfs_super_incompat_flags(disk_super); 3368 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; 3369 if (fs_info->compress_type == BTRFS_COMPRESS_LZO) 3370 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; 3371 else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD) 3372 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD; 3373 3374 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) 3375 btrfs_info(fs_info, "has skinny extents"); 3376 3377 /* 3378 * mixed block groups end up with duplicate but slightly offset 3379 * extent buffers for the same range. It leads to corruptions 3380 */ 3381 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && 3382 (sectorsize != nodesize)) { 3383 btrfs_err(fs_info, 3384 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups", 3385 nodesize, sectorsize); 3386 goto fail_alloc; 3387 } 3388 3389 /* 3390 * Needn't use the lock because there is no other task which will 3391 * update the flag. 3392 */ 3393 btrfs_set_super_incompat_flags(disk_super, features); 3394 3395 features = btrfs_super_compat_ro_flags(disk_super) & 3396 ~BTRFS_FEATURE_COMPAT_RO_SUPP; 3397 if (!sb_rdonly(sb) && features) { 3398 btrfs_err(fs_info, 3399 "cannot mount read-write because of unsupported optional features (%llx)", 3400 features); 3401 err = -EINVAL; 3402 goto fail_alloc; 3403 } 3404 3405 if (sectorsize < PAGE_SIZE) { 3406 struct btrfs_subpage_info *subpage_info; 3407 3408 btrfs_warn(fs_info, 3409 "read-write for sector size %u with page size %lu is experimental", 3410 sectorsize, PAGE_SIZE); 3411 if (btrfs_super_incompat_flags(fs_info->super_copy) & 3412 BTRFS_FEATURE_INCOMPAT_RAID56) { 3413 btrfs_err(fs_info, 3414 "RAID56 is not yet supported for sector size %u with page size %lu", 3415 sectorsize, PAGE_SIZE); 3416 err = -EINVAL; 3417 goto fail_alloc; 3418 } 3419 subpage_info = kzalloc(sizeof(*subpage_info), GFP_KERNEL); 3420 if (!subpage_info) 3421 goto fail_alloc; 3422 btrfs_init_subpage_info(subpage_info, sectorsize); 3423 fs_info->subpage_info = subpage_info; 3424 } 3425 3426 ret = btrfs_init_workqueues(fs_info, fs_devices); 3427 if (ret) { 3428 err = ret; 3429 goto fail_sb_buffer; 3430 } 3431 3432 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); 3433 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); 3434 3435 sb->s_blocksize = sectorsize; 3436 sb->s_blocksize_bits = blksize_bits(sectorsize); 3437 memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE); 3438 3439 mutex_lock(&fs_info->chunk_mutex); 3440 ret = btrfs_read_sys_array(fs_info); 3441 mutex_unlock(&fs_info->chunk_mutex); 3442 if (ret) { 3443 btrfs_err(fs_info, "failed to read the system array: %d", ret); 3444 goto fail_sb_buffer; 3445 } 3446 3447 generation = btrfs_super_chunk_root_generation(disk_super); 3448 level = btrfs_super_chunk_root_level(disk_super); 3449 3450 chunk_root->node = read_tree_block(fs_info, 3451 btrfs_super_chunk_root(disk_super), 3452 BTRFS_CHUNK_TREE_OBJECTID, 3453 generation, level, NULL); 3454 if (IS_ERR(chunk_root->node) || 3455 !extent_buffer_uptodate(chunk_root->node)) { 3456 btrfs_err(fs_info, "failed to read chunk root"); 3457 if (!IS_ERR(chunk_root->node)) 3458 free_extent_buffer(chunk_root->node); 3459 chunk_root->node = NULL; 3460 goto fail_tree_roots; 3461 } 3462 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 3463 chunk_root->commit_root = btrfs_root_node(chunk_root); 3464 3465 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, 3466 offsetof(struct btrfs_header, chunk_tree_uuid), 3467 BTRFS_UUID_SIZE); 3468 3469 ret = btrfs_read_chunk_tree(fs_info); 3470 if (ret) { 3471 btrfs_err(fs_info, "failed to read chunk tree: %d", ret); 3472 goto fail_tree_roots; 3473 } 3474 3475 /* 3476 * At this point we know all the devices that make this filesystem, 3477 * including the seed devices but we don't know yet if the replace 3478 * target is required. So free devices that are not part of this 3479 * filesystem but skip the replace target device which is checked 3480 * below in btrfs_init_dev_replace(). 3481 */ 3482 btrfs_free_extra_devids(fs_devices); 3483 if (!fs_devices->latest_dev->bdev) { 3484 btrfs_err(fs_info, "failed to read devices"); 3485 goto fail_tree_roots; 3486 } 3487 3488 ret = init_tree_roots(fs_info); 3489 if (ret) 3490 goto fail_tree_roots; 3491 3492 /* 3493 * Get zone type information of zoned block devices. This will also 3494 * handle emulation of a zoned filesystem if a regular device has the 3495 * zoned incompat feature flag set. 3496 */ 3497 ret = btrfs_get_dev_zone_info_all_devices(fs_info); 3498 if (ret) { 3499 btrfs_err(fs_info, 3500 "zoned: failed to read device zone info: %d", 3501 ret); 3502 goto fail_block_groups; 3503 } 3504 3505 /* 3506 * If we have a uuid root and we're not being told to rescan we need to 3507 * check the generation here so we can set the 3508 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the 3509 * transaction during a balance or the log replay without updating the 3510 * uuid generation, and then if we crash we would rescan the uuid tree, 3511 * even though it was perfectly fine. 3512 */ 3513 if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) && 3514 fs_info->generation == btrfs_super_uuid_tree_generation(disk_super)) 3515 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 3516 3517 ret = btrfs_verify_dev_extents(fs_info); 3518 if (ret) { 3519 btrfs_err(fs_info, 3520 "failed to verify dev extents against chunks: %d", 3521 ret); 3522 goto fail_block_groups; 3523 } 3524 ret = btrfs_recover_balance(fs_info); 3525 if (ret) { 3526 btrfs_err(fs_info, "failed to recover balance: %d", ret); 3527 goto fail_block_groups; 3528 } 3529 3530 ret = btrfs_init_dev_stats(fs_info); 3531 if (ret) { 3532 btrfs_err(fs_info, "failed to init dev_stats: %d", ret); 3533 goto fail_block_groups; 3534 } 3535 3536 ret = btrfs_init_dev_replace(fs_info); 3537 if (ret) { 3538 btrfs_err(fs_info, "failed to init dev_replace: %d", ret); 3539 goto fail_block_groups; 3540 } 3541 3542 ret = btrfs_check_zoned_mode(fs_info); 3543 if (ret) { 3544 btrfs_err(fs_info, "failed to initialize zoned mode: %d", 3545 ret); 3546 goto fail_block_groups; 3547 } 3548 3549 ret = btrfs_sysfs_add_fsid(fs_devices); 3550 if (ret) { 3551 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", 3552 ret); 3553 goto fail_block_groups; 3554 } 3555 3556 ret = btrfs_sysfs_add_mounted(fs_info); 3557 if (ret) { 3558 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret); 3559 goto fail_fsdev_sysfs; 3560 } 3561 3562 ret = btrfs_init_space_info(fs_info); 3563 if (ret) { 3564 btrfs_err(fs_info, "failed to initialize space info: %d", ret); 3565 goto fail_sysfs; 3566 } 3567 3568 ret = btrfs_read_block_groups(fs_info); 3569 if (ret) { 3570 btrfs_err(fs_info, "failed to read block groups: %d", ret); 3571 goto fail_sysfs; 3572 } 3573 3574 if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices && 3575 !btrfs_check_rw_degradable(fs_info, NULL)) { 3576 btrfs_warn(fs_info, 3577 "writable mount is not allowed due to too many missing devices"); 3578 goto fail_sysfs; 3579 } 3580 3581 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, 3582 "btrfs-cleaner"); 3583 if (IS_ERR(fs_info->cleaner_kthread)) 3584 goto fail_sysfs; 3585 3586 fs_info->transaction_kthread = kthread_run(transaction_kthread, 3587 tree_root, 3588 "btrfs-transaction"); 3589 if (IS_ERR(fs_info->transaction_kthread)) 3590 goto fail_cleaner; 3591 3592 if (!btrfs_test_opt(fs_info, NOSSD) && 3593 !fs_info->fs_devices->rotating) { 3594 btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations"); 3595 } 3596 3597 /* 3598 * Mount does not set all options immediately, we can do it now and do 3599 * not have to wait for transaction commit 3600 */ 3601 btrfs_apply_pending_changes(fs_info); 3602 3603 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 3604 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) { 3605 ret = btrfsic_mount(fs_info, fs_devices, 3606 btrfs_test_opt(fs_info, 3607 CHECK_INTEGRITY_DATA) ? 1 : 0, 3608 fs_info->check_integrity_print_mask); 3609 if (ret) 3610 btrfs_warn(fs_info, 3611 "failed to initialize integrity check module: %d", 3612 ret); 3613 } 3614 #endif 3615 ret = btrfs_read_qgroup_config(fs_info); 3616 if (ret) 3617 goto fail_trans_kthread; 3618 3619 if (btrfs_build_ref_tree(fs_info)) 3620 btrfs_err(fs_info, "couldn't build ref tree"); 3621 3622 /* do not make disk changes in broken FS or nologreplay is given */ 3623 if (btrfs_super_log_root(disk_super) != 0 && 3624 !btrfs_test_opt(fs_info, NOLOGREPLAY)) { 3625 btrfs_info(fs_info, "start tree-log replay"); 3626 ret = btrfs_replay_log(fs_info, fs_devices); 3627 if (ret) { 3628 err = ret; 3629 goto fail_qgroup; 3630 } 3631 } 3632 3633 fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true); 3634 if (IS_ERR(fs_info->fs_root)) { 3635 err = PTR_ERR(fs_info->fs_root); 3636 btrfs_warn(fs_info, "failed to read fs tree: %d", err); 3637 fs_info->fs_root = NULL; 3638 goto fail_qgroup; 3639 } 3640 3641 if (sb_rdonly(sb)) 3642 goto clear_oneshot; 3643 3644 ret = btrfs_start_pre_rw_mount(fs_info); 3645 if (ret) { 3646 close_ctree(fs_info); 3647 return ret; 3648 } 3649 btrfs_discard_resume(fs_info); 3650 3651 if (fs_info->uuid_root && 3652 (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) || 3653 fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) { 3654 btrfs_info(fs_info, "checking UUID tree"); 3655 ret = btrfs_check_uuid_tree(fs_info); 3656 if (ret) { 3657 btrfs_warn(fs_info, 3658 "failed to check the UUID tree: %d", ret); 3659 close_ctree(fs_info); 3660 return ret; 3661 } 3662 } 3663 3664 set_bit(BTRFS_FS_OPEN, &fs_info->flags); 3665 3666 clear_oneshot: 3667 btrfs_clear_oneshot_options(fs_info); 3668 return 0; 3669 3670 fail_qgroup: 3671 btrfs_free_qgroup_config(fs_info); 3672 fail_trans_kthread: 3673 kthread_stop(fs_info->transaction_kthread); 3674 btrfs_cleanup_transaction(fs_info); 3675 btrfs_free_fs_roots(fs_info); 3676 fail_cleaner: 3677 kthread_stop(fs_info->cleaner_kthread); 3678 3679 /* 3680 * make sure we're done with the btree inode before we stop our 3681 * kthreads 3682 */ 3683 filemap_write_and_wait(fs_info->btree_inode->i_mapping); 3684 3685 fail_sysfs: 3686 btrfs_sysfs_remove_mounted(fs_info); 3687 3688 fail_fsdev_sysfs: 3689 btrfs_sysfs_remove_fsid(fs_info->fs_devices); 3690 3691 fail_block_groups: 3692 btrfs_put_block_group_cache(fs_info); 3693 3694 fail_tree_roots: 3695 if (fs_info->data_reloc_root) 3696 btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root); 3697 free_root_pointers(fs_info, true); 3698 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 3699 3700 fail_sb_buffer: 3701 btrfs_stop_all_workers(fs_info); 3702 btrfs_free_block_groups(fs_info); 3703 fail_alloc: 3704 btrfs_mapping_tree_free(&fs_info->mapping_tree); 3705 3706 iput(fs_info->btree_inode); 3707 fail: 3708 btrfs_close_devices(fs_info->fs_devices); 3709 return err; 3710 } 3711 ALLOW_ERROR_INJECTION(open_ctree, ERRNO); 3712 3713 static void btrfs_end_super_write(struct bio *bio) 3714 { 3715 struct btrfs_device *device = bio->bi_private; 3716 struct bio_vec *bvec; 3717 struct bvec_iter_all iter_all; 3718 struct page *page; 3719 3720 bio_for_each_segment_all(bvec, bio, iter_all) { 3721 page = bvec->bv_page; 3722 3723 if (bio->bi_status) { 3724 btrfs_warn_rl_in_rcu(device->fs_info, 3725 "lost page write due to IO error on %s (%d)", 3726 rcu_str_deref(device->name), 3727 blk_status_to_errno(bio->bi_status)); 3728 ClearPageUptodate(page); 3729 SetPageError(page); 3730 btrfs_dev_stat_inc_and_print(device, 3731 BTRFS_DEV_STAT_WRITE_ERRS); 3732 } else { 3733 SetPageUptodate(page); 3734 } 3735 3736 put_page(page); 3737 unlock_page(page); 3738 } 3739 3740 bio_put(bio); 3741 } 3742 3743 struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, 3744 int copy_num) 3745 { 3746 struct btrfs_super_block *super; 3747 struct page *page; 3748 u64 bytenr, bytenr_orig; 3749 struct address_space *mapping = bdev->bd_inode->i_mapping; 3750 int ret; 3751 3752 bytenr_orig = btrfs_sb_offset(copy_num); 3753 ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr); 3754 if (ret == -ENOENT) 3755 return ERR_PTR(-EINVAL); 3756 else if (ret) 3757 return ERR_PTR(ret); 3758 3759 if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev)) 3760 return ERR_PTR(-EINVAL); 3761 3762 page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS); 3763 if (IS_ERR(page)) 3764 return ERR_CAST(page); 3765 3766 super = page_address(page); 3767 if (btrfs_super_magic(super) != BTRFS_MAGIC) { 3768 btrfs_release_disk_super(super); 3769 return ERR_PTR(-ENODATA); 3770 } 3771 3772 if (btrfs_super_bytenr(super) != bytenr_orig) { 3773 btrfs_release_disk_super(super); 3774 return ERR_PTR(-EINVAL); 3775 } 3776 3777 return super; 3778 } 3779 3780 3781 struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev) 3782 { 3783 struct btrfs_super_block *super, *latest = NULL; 3784 int i; 3785 u64 transid = 0; 3786 3787 /* we would like to check all the supers, but that would make 3788 * a btrfs mount succeed after a mkfs from a different FS. 3789 * So, we need to add a special mount option to scan for 3790 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 3791 */ 3792 for (i = 0; i < 1; i++) { 3793 super = btrfs_read_dev_one_super(bdev, i); 3794 if (IS_ERR(super)) 3795 continue; 3796 3797 if (!latest || btrfs_super_generation(super) > transid) { 3798 if (latest) 3799 btrfs_release_disk_super(super); 3800 3801 latest = super; 3802 transid = btrfs_super_generation(super); 3803 } 3804 } 3805 3806 return super; 3807 } 3808 3809 /* 3810 * Write superblock @sb to the @device. Do not wait for completion, all the 3811 * pages we use for writing are locked. 3812 * 3813 * Write @max_mirrors copies of the superblock, where 0 means default that fit 3814 * the expected device size at commit time. Note that max_mirrors must be 3815 * same for write and wait phases. 3816 * 3817 * Return number of errors when page is not found or submission fails. 3818 */ 3819 static int write_dev_supers(struct btrfs_device *device, 3820 struct btrfs_super_block *sb, int max_mirrors) 3821 { 3822 struct btrfs_fs_info *fs_info = device->fs_info; 3823 struct address_space *mapping = device->bdev->bd_inode->i_mapping; 3824 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3825 int i; 3826 int errors = 0; 3827 int ret; 3828 u64 bytenr, bytenr_orig; 3829 3830 if (max_mirrors == 0) 3831 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 3832 3833 shash->tfm = fs_info->csum_shash; 3834 3835 for (i = 0; i < max_mirrors; i++) { 3836 struct page *page; 3837 struct bio *bio; 3838 struct btrfs_super_block *disk_super; 3839 3840 bytenr_orig = btrfs_sb_offset(i); 3841 ret = btrfs_sb_log_location(device, i, WRITE, &bytenr); 3842 if (ret == -ENOENT) { 3843 continue; 3844 } else if (ret < 0) { 3845 btrfs_err(device->fs_info, 3846 "couldn't get super block location for mirror %d", 3847 i); 3848 errors++; 3849 continue; 3850 } 3851 if (bytenr + BTRFS_SUPER_INFO_SIZE >= 3852 device->commit_total_bytes) 3853 break; 3854 3855 btrfs_set_super_bytenr(sb, bytenr_orig); 3856 3857 crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE, 3858 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, 3859 sb->csum); 3860 3861 page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT, 3862 GFP_NOFS); 3863 if (!page) { 3864 btrfs_err(device->fs_info, 3865 "couldn't get super block page for bytenr %llu", 3866 bytenr); 3867 errors++; 3868 continue; 3869 } 3870 3871 /* Bump the refcount for wait_dev_supers() */ 3872 get_page(page); 3873 3874 disk_super = page_address(page); 3875 memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE); 3876 3877 /* 3878 * Directly use bios here instead of relying on the page cache 3879 * to do I/O, so we don't lose the ability to do integrity 3880 * checking. 3881 */ 3882 bio = bio_alloc(GFP_NOFS, 1); 3883 bio_set_dev(bio, device->bdev); 3884 bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT; 3885 bio->bi_private = device; 3886 bio->bi_end_io = btrfs_end_super_write; 3887 __bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE, 3888 offset_in_page(bytenr)); 3889 3890 /* 3891 * We FUA only the first super block. The others we allow to 3892 * go down lazy and there's a short window where the on-disk 3893 * copies might still contain the older version. 3894 */ 3895 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO; 3896 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER)) 3897 bio->bi_opf |= REQ_FUA; 3898 3899 btrfsic_submit_bio(bio); 3900 3901 if (btrfs_advance_sb_log(device, i)) 3902 errors++; 3903 } 3904 return errors < i ? 0 : -1; 3905 } 3906 3907 /* 3908 * Wait for write completion of superblocks done by write_dev_supers, 3909 * @max_mirrors same for write and wait phases. 3910 * 3911 * Return number of errors when page is not found or not marked up to 3912 * date. 3913 */ 3914 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors) 3915 { 3916 int i; 3917 int errors = 0; 3918 bool primary_failed = false; 3919 int ret; 3920 u64 bytenr; 3921 3922 if (max_mirrors == 0) 3923 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 3924 3925 for (i = 0; i < max_mirrors; i++) { 3926 struct page *page; 3927 3928 ret = btrfs_sb_log_location(device, i, READ, &bytenr); 3929 if (ret == -ENOENT) { 3930 break; 3931 } else if (ret < 0) { 3932 errors++; 3933 if (i == 0) 3934 primary_failed = true; 3935 continue; 3936 } 3937 if (bytenr + BTRFS_SUPER_INFO_SIZE >= 3938 device->commit_total_bytes) 3939 break; 3940 3941 page = find_get_page(device->bdev->bd_inode->i_mapping, 3942 bytenr >> PAGE_SHIFT); 3943 if (!page) { 3944 errors++; 3945 if (i == 0) 3946 primary_failed = true; 3947 continue; 3948 } 3949 /* Page is submitted locked and unlocked once the IO completes */ 3950 wait_on_page_locked(page); 3951 if (PageError(page)) { 3952 errors++; 3953 if (i == 0) 3954 primary_failed = true; 3955 } 3956 3957 /* Drop our reference */ 3958 put_page(page); 3959 3960 /* Drop the reference from the writing run */ 3961 put_page(page); 3962 } 3963 3964 /* log error, force error return */ 3965 if (primary_failed) { 3966 btrfs_err(device->fs_info, "error writing primary super block to device %llu", 3967 device->devid); 3968 return -1; 3969 } 3970 3971 return errors < i ? 0 : -1; 3972 } 3973 3974 /* 3975 * endio for the write_dev_flush, this will wake anyone waiting 3976 * for the barrier when it is done 3977 */ 3978 static void btrfs_end_empty_barrier(struct bio *bio) 3979 { 3980 complete(bio->bi_private); 3981 } 3982 3983 /* 3984 * Submit a flush request to the device if it supports it. Error handling is 3985 * done in the waiting counterpart. 3986 */ 3987 static void write_dev_flush(struct btrfs_device *device) 3988 { 3989 struct bio *bio = device->flush_bio; 3990 3991 #ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY 3992 /* 3993 * When a disk has write caching disabled, we skip submission of a bio 3994 * with flush and sync requests before writing the superblock, since 3995 * it's not needed. However when the integrity checker is enabled, this 3996 * results in reports that there are metadata blocks referred by a 3997 * superblock that were not properly flushed. So don't skip the bio 3998 * submission only when the integrity checker is enabled for the sake 3999 * of simplicity, since this is a debug tool and not meant for use in 4000 * non-debug builds. 4001 */ 4002 struct request_queue *q = bdev_get_queue(device->bdev); 4003 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 4004 return; 4005 #endif 4006 4007 bio_reset(bio); 4008 bio->bi_end_io = btrfs_end_empty_barrier; 4009 bio_set_dev(bio, device->bdev); 4010 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; 4011 init_completion(&device->flush_wait); 4012 bio->bi_private = &device->flush_wait; 4013 4014 btrfsic_submit_bio(bio); 4015 set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state); 4016 } 4017 4018 /* 4019 * If the flush bio has been submitted by write_dev_flush, wait for it. 4020 */ 4021 static blk_status_t wait_dev_flush(struct btrfs_device *device) 4022 { 4023 struct bio *bio = device->flush_bio; 4024 4025 if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)) 4026 return BLK_STS_OK; 4027 4028 clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state); 4029 wait_for_completion_io(&device->flush_wait); 4030 4031 return bio->bi_status; 4032 } 4033 4034 static int check_barrier_error(struct btrfs_fs_info *fs_info) 4035 { 4036 if (!btrfs_check_rw_degradable(fs_info, NULL)) 4037 return -EIO; 4038 return 0; 4039 } 4040 4041 /* 4042 * send an empty flush down to each device in parallel, 4043 * then wait for them 4044 */ 4045 static int barrier_all_devices(struct btrfs_fs_info *info) 4046 { 4047 struct list_head *head; 4048 struct btrfs_device *dev; 4049 int errors_wait = 0; 4050 blk_status_t ret; 4051 4052 lockdep_assert_held(&info->fs_devices->device_list_mutex); 4053 /* send down all the barriers */ 4054 head = &info->fs_devices->devices; 4055 list_for_each_entry(dev, head, dev_list) { 4056 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) 4057 continue; 4058 if (!dev->bdev) 4059 continue; 4060 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 4061 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) 4062 continue; 4063 4064 write_dev_flush(dev); 4065 dev->last_flush_error = BLK_STS_OK; 4066 } 4067 4068 /* wait for all the barriers */ 4069 list_for_each_entry(dev, head, dev_list) { 4070 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) 4071 continue; 4072 if (!dev->bdev) { 4073 errors_wait++; 4074 continue; 4075 } 4076 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 4077 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) 4078 continue; 4079 4080 ret = wait_dev_flush(dev); 4081 if (ret) { 4082 dev->last_flush_error = ret; 4083 btrfs_dev_stat_inc_and_print(dev, 4084 BTRFS_DEV_STAT_FLUSH_ERRS); 4085 errors_wait++; 4086 } 4087 } 4088 4089 if (errors_wait) { 4090 /* 4091 * At some point we need the status of all disks 4092 * to arrive at the volume status. So error checking 4093 * is being pushed to a separate loop. 4094 */ 4095 return check_barrier_error(info); 4096 } 4097 return 0; 4098 } 4099 4100 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) 4101 { 4102 int raid_type; 4103 int min_tolerated = INT_MAX; 4104 4105 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || 4106 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) 4107 min_tolerated = min_t(int, min_tolerated, 4108 btrfs_raid_array[BTRFS_RAID_SINGLE]. 4109 tolerated_failures); 4110 4111 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 4112 if (raid_type == BTRFS_RAID_SINGLE) 4113 continue; 4114 if (!(flags & btrfs_raid_array[raid_type].bg_flag)) 4115 continue; 4116 min_tolerated = min_t(int, min_tolerated, 4117 btrfs_raid_array[raid_type]. 4118 tolerated_failures); 4119 } 4120 4121 if (min_tolerated == INT_MAX) { 4122 pr_warn("BTRFS: unknown raid flag: %llu", flags); 4123 min_tolerated = 0; 4124 } 4125 4126 return min_tolerated; 4127 } 4128 4129 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) 4130 { 4131 struct list_head *head; 4132 struct btrfs_device *dev; 4133 struct btrfs_super_block *sb; 4134 struct btrfs_dev_item *dev_item; 4135 int ret; 4136 int do_barriers; 4137 int max_errors; 4138 int total_errors = 0; 4139 u64 flags; 4140 4141 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); 4142 4143 /* 4144 * max_mirrors == 0 indicates we're from commit_transaction, 4145 * not from fsync where the tree roots in fs_info have not 4146 * been consistent on disk. 4147 */ 4148 if (max_mirrors == 0) 4149 backup_super_roots(fs_info); 4150 4151 sb = fs_info->super_for_commit; 4152 dev_item = &sb->dev_item; 4153 4154 mutex_lock(&fs_info->fs_devices->device_list_mutex); 4155 head = &fs_info->fs_devices->devices; 4156 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1; 4157 4158 if (do_barriers) { 4159 ret = barrier_all_devices(fs_info); 4160 if (ret) { 4161 mutex_unlock( 4162 &fs_info->fs_devices->device_list_mutex); 4163 btrfs_handle_fs_error(fs_info, ret, 4164 "errors while submitting device barriers."); 4165 return ret; 4166 } 4167 } 4168 4169 list_for_each_entry(dev, head, dev_list) { 4170 if (!dev->bdev) { 4171 total_errors++; 4172 continue; 4173 } 4174 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 4175 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) 4176 continue; 4177 4178 btrfs_set_stack_device_generation(dev_item, 0); 4179 btrfs_set_stack_device_type(dev_item, dev->type); 4180 btrfs_set_stack_device_id(dev_item, dev->devid); 4181 btrfs_set_stack_device_total_bytes(dev_item, 4182 dev->commit_total_bytes); 4183 btrfs_set_stack_device_bytes_used(dev_item, 4184 dev->commit_bytes_used); 4185 btrfs_set_stack_device_io_align(dev_item, dev->io_align); 4186 btrfs_set_stack_device_io_width(dev_item, dev->io_width); 4187 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); 4188 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); 4189 memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid, 4190 BTRFS_FSID_SIZE); 4191 4192 flags = btrfs_super_flags(sb); 4193 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); 4194 4195 ret = btrfs_validate_write_super(fs_info, sb); 4196 if (ret < 0) { 4197 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4198 btrfs_handle_fs_error(fs_info, -EUCLEAN, 4199 "unexpected superblock corruption detected"); 4200 return -EUCLEAN; 4201 } 4202 4203 ret = write_dev_supers(dev, sb, max_mirrors); 4204 if (ret) 4205 total_errors++; 4206 } 4207 if (total_errors > max_errors) { 4208 btrfs_err(fs_info, "%d errors while writing supers", 4209 total_errors); 4210 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4211 4212 /* FUA is masked off if unsupported and can't be the reason */ 4213 btrfs_handle_fs_error(fs_info, -EIO, 4214 "%d errors while writing supers", 4215 total_errors); 4216 return -EIO; 4217 } 4218 4219 total_errors = 0; 4220 list_for_each_entry(dev, head, dev_list) { 4221 if (!dev->bdev) 4222 continue; 4223 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 4224 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) 4225 continue; 4226 4227 ret = wait_dev_supers(dev, max_mirrors); 4228 if (ret) 4229 total_errors++; 4230 } 4231 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4232 if (total_errors > max_errors) { 4233 btrfs_handle_fs_error(fs_info, -EIO, 4234 "%d errors while writing supers", 4235 total_errors); 4236 return -EIO; 4237 } 4238 return 0; 4239 } 4240 4241 /* Drop a fs root from the radix tree and free it. */ 4242 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, 4243 struct btrfs_root *root) 4244 { 4245 bool drop_ref = false; 4246 4247 spin_lock(&fs_info->fs_roots_radix_lock); 4248 radix_tree_delete(&fs_info->fs_roots_radix, 4249 (unsigned long)root->root_key.objectid); 4250 if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state)) 4251 drop_ref = true; 4252 spin_unlock(&fs_info->fs_roots_radix_lock); 4253 4254 if (BTRFS_FS_ERROR(fs_info)) { 4255 ASSERT(root->log_root == NULL); 4256 if (root->reloc_root) { 4257 btrfs_put_root(root->reloc_root); 4258 root->reloc_root = NULL; 4259 } 4260 } 4261 4262 if (drop_ref) 4263 btrfs_put_root(root); 4264 } 4265 4266 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) 4267 { 4268 u64 root_objectid = 0; 4269 struct btrfs_root *gang[8]; 4270 int i = 0; 4271 int err = 0; 4272 unsigned int ret = 0; 4273 4274 while (1) { 4275 spin_lock(&fs_info->fs_roots_radix_lock); 4276 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 4277 (void **)gang, root_objectid, 4278 ARRAY_SIZE(gang)); 4279 if (!ret) { 4280 spin_unlock(&fs_info->fs_roots_radix_lock); 4281 break; 4282 } 4283 root_objectid = gang[ret - 1]->root_key.objectid + 1; 4284 4285 for (i = 0; i < ret; i++) { 4286 /* Avoid to grab roots in dead_roots */ 4287 if (btrfs_root_refs(&gang[i]->root_item) == 0) { 4288 gang[i] = NULL; 4289 continue; 4290 } 4291 /* grab all the search result for later use */ 4292 gang[i] = btrfs_grab_root(gang[i]); 4293 } 4294 spin_unlock(&fs_info->fs_roots_radix_lock); 4295 4296 for (i = 0; i < ret; i++) { 4297 if (!gang[i]) 4298 continue; 4299 root_objectid = gang[i]->root_key.objectid; 4300 err = btrfs_orphan_cleanup(gang[i]); 4301 if (err) 4302 break; 4303 btrfs_put_root(gang[i]); 4304 } 4305 root_objectid++; 4306 } 4307 4308 /* release the uncleaned roots due to error */ 4309 for (; i < ret; i++) { 4310 if (gang[i]) 4311 btrfs_put_root(gang[i]); 4312 } 4313 return err; 4314 } 4315 4316 int btrfs_commit_super(struct btrfs_fs_info *fs_info) 4317 { 4318 struct btrfs_root *root = fs_info->tree_root; 4319 struct btrfs_trans_handle *trans; 4320 4321 mutex_lock(&fs_info->cleaner_mutex); 4322 btrfs_run_delayed_iputs(fs_info); 4323 mutex_unlock(&fs_info->cleaner_mutex); 4324 wake_up_process(fs_info->cleaner_kthread); 4325 4326 /* wait until ongoing cleanup work done */ 4327 down_write(&fs_info->cleanup_work_sem); 4328 up_write(&fs_info->cleanup_work_sem); 4329 4330 trans = btrfs_join_transaction(root); 4331 if (IS_ERR(trans)) 4332 return PTR_ERR(trans); 4333 return btrfs_commit_transaction(trans); 4334 } 4335 4336 void __cold close_ctree(struct btrfs_fs_info *fs_info) 4337 { 4338 int ret; 4339 4340 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); 4341 /* 4342 * We don't want the cleaner to start new transactions, add more delayed 4343 * iputs, etc. while we're closing. We can't use kthread_stop() yet 4344 * because that frees the task_struct, and the transaction kthread might 4345 * still try to wake up the cleaner. 4346 */ 4347 kthread_park(fs_info->cleaner_kthread); 4348 4349 /* wait for the qgroup rescan worker to stop */ 4350 btrfs_qgroup_wait_for_completion(fs_info, false); 4351 4352 /* wait for the uuid_scan task to finish */ 4353 down(&fs_info->uuid_tree_rescan_sem); 4354 /* avoid complains from lockdep et al., set sem back to initial state */ 4355 up(&fs_info->uuid_tree_rescan_sem); 4356 4357 /* pause restriper - we want to resume on mount */ 4358 btrfs_pause_balance(fs_info); 4359 4360 btrfs_dev_replace_suspend_for_unmount(fs_info); 4361 4362 btrfs_scrub_cancel(fs_info); 4363 4364 /* wait for any defraggers to finish */ 4365 wait_event(fs_info->transaction_wait, 4366 (atomic_read(&fs_info->defrag_running) == 0)); 4367 4368 /* clear out the rbtree of defraggable inodes */ 4369 btrfs_cleanup_defrag_inodes(fs_info); 4370 4371 cancel_work_sync(&fs_info->async_reclaim_work); 4372 cancel_work_sync(&fs_info->async_data_reclaim_work); 4373 cancel_work_sync(&fs_info->preempt_reclaim_work); 4374 4375 cancel_work_sync(&fs_info->reclaim_bgs_work); 4376 4377 /* Cancel or finish ongoing discard work */ 4378 btrfs_discard_cleanup(fs_info); 4379 4380 if (!sb_rdonly(fs_info->sb)) { 4381 /* 4382 * The cleaner kthread is stopped, so do one final pass over 4383 * unused block groups. 4384 */ 4385 btrfs_delete_unused_bgs(fs_info); 4386 4387 /* 4388 * There might be existing delayed inode workers still running 4389 * and holding an empty delayed inode item. We must wait for 4390 * them to complete first because they can create a transaction. 4391 * This happens when someone calls btrfs_balance_delayed_items() 4392 * and then a transaction commit runs the same delayed nodes 4393 * before any delayed worker has done something with the nodes. 4394 * We must wait for any worker here and not at transaction 4395 * commit time since that could cause a deadlock. 4396 * This is a very rare case. 4397 */ 4398 btrfs_flush_workqueue(fs_info->delayed_workers); 4399 4400 ret = btrfs_commit_super(fs_info); 4401 if (ret) 4402 btrfs_err(fs_info, "commit super ret %d", ret); 4403 } 4404 4405 if (BTRFS_FS_ERROR(fs_info)) 4406 btrfs_error_commit_super(fs_info); 4407 4408 kthread_stop(fs_info->transaction_kthread); 4409 kthread_stop(fs_info->cleaner_kthread); 4410 4411 ASSERT(list_empty(&fs_info->delayed_iputs)); 4412 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); 4413 4414 if (btrfs_check_quota_leak(fs_info)) { 4415 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 4416 btrfs_err(fs_info, "qgroup reserved space leaked"); 4417 } 4418 4419 btrfs_free_qgroup_config(fs_info); 4420 ASSERT(list_empty(&fs_info->delalloc_roots)); 4421 4422 if (percpu_counter_sum(&fs_info->delalloc_bytes)) { 4423 btrfs_info(fs_info, "at unmount delalloc count %lld", 4424 percpu_counter_sum(&fs_info->delalloc_bytes)); 4425 } 4426 4427 if (percpu_counter_sum(&fs_info->ordered_bytes)) 4428 btrfs_info(fs_info, "at unmount dio bytes count %lld", 4429 percpu_counter_sum(&fs_info->ordered_bytes)); 4430 4431 btrfs_sysfs_remove_mounted(fs_info); 4432 btrfs_sysfs_remove_fsid(fs_info->fs_devices); 4433 4434 btrfs_put_block_group_cache(fs_info); 4435 4436 /* 4437 * we must make sure there is not any read request to 4438 * submit after we stopping all workers. 4439 */ 4440 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 4441 btrfs_stop_all_workers(fs_info); 4442 4443 /* We shouldn't have any transaction open at this point */ 4444 ASSERT(list_empty(&fs_info->trans_list)); 4445 4446 clear_bit(BTRFS_FS_OPEN, &fs_info->flags); 4447 free_root_pointers(fs_info, true); 4448 btrfs_free_fs_roots(fs_info); 4449 4450 /* 4451 * We must free the block groups after dropping the fs_roots as we could 4452 * have had an IO error and have left over tree log blocks that aren't 4453 * cleaned up until the fs roots are freed. This makes the block group 4454 * accounting appear to be wrong because there's pending reserved bytes, 4455 * so make sure we do the block group cleanup afterwards. 4456 */ 4457 btrfs_free_block_groups(fs_info); 4458 4459 iput(fs_info->btree_inode); 4460 4461 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 4462 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) 4463 btrfsic_unmount(fs_info->fs_devices); 4464 #endif 4465 4466 btrfs_mapping_tree_free(&fs_info->mapping_tree); 4467 btrfs_close_devices(fs_info->fs_devices); 4468 } 4469 4470 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, 4471 int atomic) 4472 { 4473 int ret; 4474 struct inode *btree_inode = buf->pages[0]->mapping->host; 4475 4476 ret = extent_buffer_uptodate(buf); 4477 if (!ret) 4478 return ret; 4479 4480 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, 4481 parent_transid, atomic); 4482 if (ret == -EAGAIN) 4483 return ret; 4484 return !ret; 4485 } 4486 4487 void btrfs_mark_buffer_dirty(struct extent_buffer *buf) 4488 { 4489 struct btrfs_fs_info *fs_info = buf->fs_info; 4490 u64 transid = btrfs_header_generation(buf); 4491 int was_dirty; 4492 4493 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4494 /* 4495 * This is a fast path so only do this check if we have sanity tests 4496 * enabled. Normal people shouldn't be using unmapped buffers as dirty 4497 * outside of the sanity tests. 4498 */ 4499 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags))) 4500 return; 4501 #endif 4502 btrfs_assert_tree_write_locked(buf); 4503 if (transid != fs_info->generation) 4504 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n", 4505 buf->start, transid, fs_info->generation); 4506 was_dirty = set_extent_buffer_dirty(buf); 4507 if (!was_dirty) 4508 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, 4509 buf->len, 4510 fs_info->dirty_metadata_batch); 4511 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 4512 /* 4513 * Since btrfs_mark_buffer_dirty() can be called with item pointer set 4514 * but item data not updated. 4515 * So here we should only check item pointers, not item data. 4516 */ 4517 if (btrfs_header_level(buf) == 0 && 4518 btrfs_check_leaf_relaxed(buf)) { 4519 btrfs_print_leaf(buf); 4520 ASSERT(0); 4521 } 4522 #endif 4523 } 4524 4525 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, 4526 int flush_delayed) 4527 { 4528 /* 4529 * looks as though older kernels can get into trouble with 4530 * this code, they end up stuck in balance_dirty_pages forever 4531 */ 4532 int ret; 4533 4534 if (current->flags & PF_MEMALLOC) 4535 return; 4536 4537 if (flush_delayed) 4538 btrfs_balance_delayed_items(fs_info); 4539 4540 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, 4541 BTRFS_DIRTY_METADATA_THRESH, 4542 fs_info->dirty_metadata_batch); 4543 if (ret > 0) { 4544 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping); 4545 } 4546 } 4547 4548 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info) 4549 { 4550 __btrfs_btree_balance_dirty(fs_info, 1); 4551 } 4552 4553 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info) 4554 { 4555 __btrfs_btree_balance_dirty(fs_info, 0); 4556 } 4557 4558 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level, 4559 struct btrfs_key *first_key) 4560 { 4561 return btree_read_extent_buffer_pages(buf, parent_transid, 4562 level, first_key); 4563 } 4564 4565 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) 4566 { 4567 /* cleanup FS via transaction */ 4568 btrfs_cleanup_transaction(fs_info); 4569 4570 mutex_lock(&fs_info->cleaner_mutex); 4571 btrfs_run_delayed_iputs(fs_info); 4572 mutex_unlock(&fs_info->cleaner_mutex); 4573 4574 down_write(&fs_info->cleanup_work_sem); 4575 up_write(&fs_info->cleanup_work_sem); 4576 } 4577 4578 static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info) 4579 { 4580 struct btrfs_root *gang[8]; 4581 u64 root_objectid = 0; 4582 int ret; 4583 4584 spin_lock(&fs_info->fs_roots_radix_lock); 4585 while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 4586 (void **)gang, root_objectid, 4587 ARRAY_SIZE(gang))) != 0) { 4588 int i; 4589 4590 for (i = 0; i < ret; i++) 4591 gang[i] = btrfs_grab_root(gang[i]); 4592 spin_unlock(&fs_info->fs_roots_radix_lock); 4593 4594 for (i = 0; i < ret; i++) { 4595 if (!gang[i]) 4596 continue; 4597 root_objectid = gang[i]->root_key.objectid; 4598 btrfs_free_log(NULL, gang[i]); 4599 btrfs_put_root(gang[i]); 4600 } 4601 root_objectid++; 4602 spin_lock(&fs_info->fs_roots_radix_lock); 4603 } 4604 spin_unlock(&fs_info->fs_roots_radix_lock); 4605 btrfs_free_log_root_tree(NULL, fs_info); 4606 } 4607 4608 static void btrfs_destroy_ordered_extents(struct btrfs_root *root) 4609 { 4610 struct btrfs_ordered_extent *ordered; 4611 4612 spin_lock(&root->ordered_extent_lock); 4613 /* 4614 * This will just short circuit the ordered completion stuff which will 4615 * make sure the ordered extent gets properly cleaned up. 4616 */ 4617 list_for_each_entry(ordered, &root->ordered_extents, 4618 root_extent_list) 4619 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); 4620 spin_unlock(&root->ordered_extent_lock); 4621 } 4622 4623 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) 4624 { 4625 struct btrfs_root *root; 4626 struct list_head splice; 4627 4628 INIT_LIST_HEAD(&splice); 4629 4630 spin_lock(&fs_info->ordered_root_lock); 4631 list_splice_init(&fs_info->ordered_roots, &splice); 4632 while (!list_empty(&splice)) { 4633 root = list_first_entry(&splice, struct btrfs_root, 4634 ordered_root); 4635 list_move_tail(&root->ordered_root, 4636 &fs_info->ordered_roots); 4637 4638 spin_unlock(&fs_info->ordered_root_lock); 4639 btrfs_destroy_ordered_extents(root); 4640 4641 cond_resched(); 4642 spin_lock(&fs_info->ordered_root_lock); 4643 } 4644 spin_unlock(&fs_info->ordered_root_lock); 4645 4646 /* 4647 * We need this here because if we've been flipped read-only we won't 4648 * get sync() from the umount, so we need to make sure any ordered 4649 * extents that haven't had their dirty pages IO start writeout yet 4650 * actually get run and error out properly. 4651 */ 4652 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); 4653 } 4654 4655 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 4656 struct btrfs_fs_info *fs_info) 4657 { 4658 struct rb_node *node; 4659 struct btrfs_delayed_ref_root *delayed_refs; 4660 struct btrfs_delayed_ref_node *ref; 4661 int ret = 0; 4662 4663 delayed_refs = &trans->delayed_refs; 4664 4665 spin_lock(&delayed_refs->lock); 4666 if (atomic_read(&delayed_refs->num_entries) == 0) { 4667 spin_unlock(&delayed_refs->lock); 4668 btrfs_debug(fs_info, "delayed_refs has NO entry"); 4669 return ret; 4670 } 4671 4672 while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) { 4673 struct btrfs_delayed_ref_head *head; 4674 struct rb_node *n; 4675 bool pin_bytes = false; 4676 4677 head = rb_entry(node, struct btrfs_delayed_ref_head, 4678 href_node); 4679 if (btrfs_delayed_ref_lock(delayed_refs, head)) 4680 continue; 4681 4682 spin_lock(&head->lock); 4683 while ((n = rb_first_cached(&head->ref_tree)) != NULL) { 4684 ref = rb_entry(n, struct btrfs_delayed_ref_node, 4685 ref_node); 4686 ref->in_tree = 0; 4687 rb_erase_cached(&ref->ref_node, &head->ref_tree); 4688 RB_CLEAR_NODE(&ref->ref_node); 4689 if (!list_empty(&ref->add_list)) 4690 list_del(&ref->add_list); 4691 atomic_dec(&delayed_refs->num_entries); 4692 btrfs_put_delayed_ref(ref); 4693 } 4694 if (head->must_insert_reserved) 4695 pin_bytes = true; 4696 btrfs_free_delayed_extent_op(head->extent_op); 4697 btrfs_delete_ref_head(delayed_refs, head); 4698 spin_unlock(&head->lock); 4699 spin_unlock(&delayed_refs->lock); 4700 mutex_unlock(&head->mutex); 4701 4702 if (pin_bytes) { 4703 struct btrfs_block_group *cache; 4704 4705 cache = btrfs_lookup_block_group(fs_info, head->bytenr); 4706 BUG_ON(!cache); 4707 4708 spin_lock(&cache->space_info->lock); 4709 spin_lock(&cache->lock); 4710 cache->pinned += head->num_bytes; 4711 btrfs_space_info_update_bytes_pinned(fs_info, 4712 cache->space_info, head->num_bytes); 4713 cache->reserved -= head->num_bytes; 4714 cache->space_info->bytes_reserved -= head->num_bytes; 4715 spin_unlock(&cache->lock); 4716 spin_unlock(&cache->space_info->lock); 4717 4718 btrfs_put_block_group(cache); 4719 4720 btrfs_error_unpin_extent_range(fs_info, head->bytenr, 4721 head->bytenr + head->num_bytes - 1); 4722 } 4723 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); 4724 btrfs_put_delayed_ref_head(head); 4725 cond_resched(); 4726 spin_lock(&delayed_refs->lock); 4727 } 4728 btrfs_qgroup_destroy_extent_records(trans); 4729 4730 spin_unlock(&delayed_refs->lock); 4731 4732 return ret; 4733 } 4734 4735 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) 4736 { 4737 struct btrfs_inode *btrfs_inode; 4738 struct list_head splice; 4739 4740 INIT_LIST_HEAD(&splice); 4741 4742 spin_lock(&root->delalloc_lock); 4743 list_splice_init(&root->delalloc_inodes, &splice); 4744 4745 while (!list_empty(&splice)) { 4746 struct inode *inode = NULL; 4747 btrfs_inode = list_first_entry(&splice, struct btrfs_inode, 4748 delalloc_inodes); 4749 __btrfs_del_delalloc_inode(root, btrfs_inode); 4750 spin_unlock(&root->delalloc_lock); 4751 4752 /* 4753 * Make sure we get a live inode and that it'll not disappear 4754 * meanwhile. 4755 */ 4756 inode = igrab(&btrfs_inode->vfs_inode); 4757 if (inode) { 4758 invalidate_inode_pages2(inode->i_mapping); 4759 iput(inode); 4760 } 4761 spin_lock(&root->delalloc_lock); 4762 } 4763 spin_unlock(&root->delalloc_lock); 4764 } 4765 4766 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) 4767 { 4768 struct btrfs_root *root; 4769 struct list_head splice; 4770 4771 INIT_LIST_HEAD(&splice); 4772 4773 spin_lock(&fs_info->delalloc_root_lock); 4774 list_splice_init(&fs_info->delalloc_roots, &splice); 4775 while (!list_empty(&splice)) { 4776 root = list_first_entry(&splice, struct btrfs_root, 4777 delalloc_root); 4778 root = btrfs_grab_root(root); 4779 BUG_ON(!root); 4780 spin_unlock(&fs_info->delalloc_root_lock); 4781 4782 btrfs_destroy_delalloc_inodes(root); 4783 btrfs_put_root(root); 4784 4785 spin_lock(&fs_info->delalloc_root_lock); 4786 } 4787 spin_unlock(&fs_info->delalloc_root_lock); 4788 } 4789 4790 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, 4791 struct extent_io_tree *dirty_pages, 4792 int mark) 4793 { 4794 int ret; 4795 struct extent_buffer *eb; 4796 u64 start = 0; 4797 u64 end; 4798 4799 while (1) { 4800 ret = find_first_extent_bit(dirty_pages, start, &start, &end, 4801 mark, NULL); 4802 if (ret) 4803 break; 4804 4805 clear_extent_bits(dirty_pages, start, end, mark); 4806 while (start <= end) { 4807 eb = find_extent_buffer(fs_info, start); 4808 start += fs_info->nodesize; 4809 if (!eb) 4810 continue; 4811 wait_on_extent_buffer_writeback(eb); 4812 4813 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, 4814 &eb->bflags)) 4815 clear_extent_buffer_dirty(eb); 4816 free_extent_buffer_stale(eb); 4817 } 4818 } 4819 4820 return ret; 4821 } 4822 4823 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, 4824 struct extent_io_tree *unpin) 4825 { 4826 u64 start; 4827 u64 end; 4828 int ret; 4829 4830 while (1) { 4831 struct extent_state *cached_state = NULL; 4832 4833 /* 4834 * The btrfs_finish_extent_commit() may get the same range as 4835 * ours between find_first_extent_bit and clear_extent_dirty. 4836 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin 4837 * the same extent range. 4838 */ 4839 mutex_lock(&fs_info->unused_bg_unpin_mutex); 4840 ret = find_first_extent_bit(unpin, 0, &start, &end, 4841 EXTENT_DIRTY, &cached_state); 4842 if (ret) { 4843 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 4844 break; 4845 } 4846 4847 clear_extent_dirty(unpin, start, end, &cached_state); 4848 free_extent_state(cached_state); 4849 btrfs_error_unpin_extent_range(fs_info, start, end); 4850 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 4851 cond_resched(); 4852 } 4853 4854 return 0; 4855 } 4856 4857 static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache) 4858 { 4859 struct inode *inode; 4860 4861 inode = cache->io_ctl.inode; 4862 if (inode) { 4863 invalidate_inode_pages2(inode->i_mapping); 4864 BTRFS_I(inode)->generation = 0; 4865 cache->io_ctl.inode = NULL; 4866 iput(inode); 4867 } 4868 ASSERT(cache->io_ctl.pages == NULL); 4869 btrfs_put_block_group(cache); 4870 } 4871 4872 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, 4873 struct btrfs_fs_info *fs_info) 4874 { 4875 struct btrfs_block_group *cache; 4876 4877 spin_lock(&cur_trans->dirty_bgs_lock); 4878 while (!list_empty(&cur_trans->dirty_bgs)) { 4879 cache = list_first_entry(&cur_trans->dirty_bgs, 4880 struct btrfs_block_group, 4881 dirty_list); 4882 4883 if (!list_empty(&cache->io_list)) { 4884 spin_unlock(&cur_trans->dirty_bgs_lock); 4885 list_del_init(&cache->io_list); 4886 btrfs_cleanup_bg_io(cache); 4887 spin_lock(&cur_trans->dirty_bgs_lock); 4888 } 4889 4890 list_del_init(&cache->dirty_list); 4891 spin_lock(&cache->lock); 4892 cache->disk_cache_state = BTRFS_DC_ERROR; 4893 spin_unlock(&cache->lock); 4894 4895 spin_unlock(&cur_trans->dirty_bgs_lock); 4896 btrfs_put_block_group(cache); 4897 btrfs_delayed_refs_rsv_release(fs_info, 1); 4898 spin_lock(&cur_trans->dirty_bgs_lock); 4899 } 4900 spin_unlock(&cur_trans->dirty_bgs_lock); 4901 4902 /* 4903 * Refer to the definition of io_bgs member for details why it's safe 4904 * to use it without any locking 4905 */ 4906 while (!list_empty(&cur_trans->io_bgs)) { 4907 cache = list_first_entry(&cur_trans->io_bgs, 4908 struct btrfs_block_group, 4909 io_list); 4910 4911 list_del_init(&cache->io_list); 4912 spin_lock(&cache->lock); 4913 cache->disk_cache_state = BTRFS_DC_ERROR; 4914 spin_unlock(&cache->lock); 4915 btrfs_cleanup_bg_io(cache); 4916 } 4917 } 4918 4919 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, 4920 struct btrfs_fs_info *fs_info) 4921 { 4922 struct btrfs_device *dev, *tmp; 4923 4924 btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 4925 ASSERT(list_empty(&cur_trans->dirty_bgs)); 4926 ASSERT(list_empty(&cur_trans->io_bgs)); 4927 4928 list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list, 4929 post_commit_list) { 4930 list_del_init(&dev->post_commit_list); 4931 } 4932 4933 btrfs_destroy_delayed_refs(cur_trans, fs_info); 4934 4935 cur_trans->state = TRANS_STATE_COMMIT_START; 4936 wake_up(&fs_info->transaction_blocked_wait); 4937 4938 cur_trans->state = TRANS_STATE_UNBLOCKED; 4939 wake_up(&fs_info->transaction_wait); 4940 4941 btrfs_destroy_delayed_inodes(fs_info); 4942 4943 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, 4944 EXTENT_DIRTY); 4945 btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents); 4946 4947 btrfs_free_redirty_list(cur_trans); 4948 4949 cur_trans->state =TRANS_STATE_COMPLETED; 4950 wake_up(&cur_trans->commit_wait); 4951 } 4952 4953 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) 4954 { 4955 struct btrfs_transaction *t; 4956 4957 mutex_lock(&fs_info->transaction_kthread_mutex); 4958 4959 spin_lock(&fs_info->trans_lock); 4960 while (!list_empty(&fs_info->trans_list)) { 4961 t = list_first_entry(&fs_info->trans_list, 4962 struct btrfs_transaction, list); 4963 if (t->state >= TRANS_STATE_COMMIT_START) { 4964 refcount_inc(&t->use_count); 4965 spin_unlock(&fs_info->trans_lock); 4966 btrfs_wait_for_commit(fs_info, t->transid); 4967 btrfs_put_transaction(t); 4968 spin_lock(&fs_info->trans_lock); 4969 continue; 4970 } 4971 if (t == fs_info->running_transaction) { 4972 t->state = TRANS_STATE_COMMIT_DOING; 4973 spin_unlock(&fs_info->trans_lock); 4974 /* 4975 * We wait for 0 num_writers since we don't hold a trans 4976 * handle open currently for this transaction. 4977 */ 4978 wait_event(t->writer_wait, 4979 atomic_read(&t->num_writers) == 0); 4980 } else { 4981 spin_unlock(&fs_info->trans_lock); 4982 } 4983 btrfs_cleanup_one_transaction(t, fs_info); 4984 4985 spin_lock(&fs_info->trans_lock); 4986 if (t == fs_info->running_transaction) 4987 fs_info->running_transaction = NULL; 4988 list_del_init(&t->list); 4989 spin_unlock(&fs_info->trans_lock); 4990 4991 btrfs_put_transaction(t); 4992 trace_btrfs_transaction_commit(fs_info->tree_root); 4993 spin_lock(&fs_info->trans_lock); 4994 } 4995 spin_unlock(&fs_info->trans_lock); 4996 btrfs_destroy_all_ordered_extents(fs_info); 4997 btrfs_destroy_delayed_inodes(fs_info); 4998 btrfs_assert_delayed_root_empty(fs_info); 4999 btrfs_destroy_all_delalloc_inodes(fs_info); 5000 btrfs_drop_all_logs(fs_info); 5001 mutex_unlock(&fs_info->transaction_kthread_mutex); 5002 5003 return 0; 5004 } 5005 5006 int btrfs_init_root_free_objectid(struct btrfs_root *root) 5007 { 5008 struct btrfs_path *path; 5009 int ret; 5010 struct extent_buffer *l; 5011 struct btrfs_key search_key; 5012 struct btrfs_key found_key; 5013 int slot; 5014 5015 path = btrfs_alloc_path(); 5016 if (!path) 5017 return -ENOMEM; 5018 5019 search_key.objectid = BTRFS_LAST_FREE_OBJECTID; 5020 search_key.type = -1; 5021 search_key.offset = (u64)-1; 5022 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 5023 if (ret < 0) 5024 goto error; 5025 BUG_ON(ret == 0); /* Corruption */ 5026 if (path->slots[0] > 0) { 5027 slot = path->slots[0] - 1; 5028 l = path->nodes[0]; 5029 btrfs_item_key_to_cpu(l, &found_key, slot); 5030 root->free_objectid = max_t(u64, found_key.objectid + 1, 5031 BTRFS_FIRST_FREE_OBJECTID); 5032 } else { 5033 root->free_objectid = BTRFS_FIRST_FREE_OBJECTID; 5034 } 5035 ret = 0; 5036 error: 5037 btrfs_free_path(path); 5038 return ret; 5039 } 5040 5041 int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid) 5042 { 5043 int ret; 5044 mutex_lock(&root->objectid_mutex); 5045 5046 if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) { 5047 btrfs_warn(root->fs_info, 5048 "the objectid of root %llu reaches its highest value", 5049 root->root_key.objectid); 5050 ret = -ENOSPC; 5051 goto out; 5052 } 5053 5054 *objectid = root->free_objectid++; 5055 ret = 0; 5056 out: 5057 mutex_unlock(&root->objectid_mutex); 5058 return ret; 5059 } 5060