1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/slab.h> 8 #include <linux/sched.h> 9 #include <linux/writeback.h> 10 #include <linux/pagemap.h> 11 #include <linux/blkdev.h> 12 #include <linux/uuid.h> 13 #include "misc.h" 14 #include "ctree.h" 15 #include "disk-io.h" 16 #include "transaction.h" 17 #include "locking.h" 18 #include "tree-log.h" 19 #include "inode-map.h" 20 #include "volumes.h" 21 #include "dev-replace.h" 22 #include "qgroup.h" 23 #include "block-group.h" 24 #include "space-info.h" 25 26 #define BTRFS_ROOT_TRANS_TAG 0 27 28 /* 29 * Transaction states and transitions 30 * 31 * No running transaction (fs tree blocks are not modified) 32 * | 33 * | To next stage: 34 * | Call start_transaction() variants. Except btrfs_join_transaction_nostart(). 35 * V 36 * Transaction N [[TRANS_STATE_RUNNING]] 37 * | 38 * | New trans handles can be attached to transaction N by calling all 39 * | start_transaction() variants. 40 * | 41 * | To next stage: 42 * | Call btrfs_commit_transaction() on any trans handle attached to 43 * | transaction N 44 * V 45 * Transaction N [[TRANS_STATE_COMMIT_START]] 46 * | 47 * | Will wait for previous running transaction to completely finish if there 48 * | is one 49 * | 50 * | Then one of the following happes: 51 * | - Wait for all other trans handle holders to release. 52 * | The btrfs_commit_transaction() caller will do the commit work. 53 * | - Wait for current transaction to be committed by others. 54 * | Other btrfs_commit_transaction() caller will do the commit work. 55 * | 56 * | At this stage, only btrfs_join_transaction*() variants can attach 57 * | to this running transaction. 58 * | All other variants will wait for current one to finish and attach to 59 * | transaction N+1. 60 * | 61 * | To next stage: 62 * | Caller is chosen to commit transaction N, and all other trans handle 63 * | haven been released. 64 * V 65 * Transaction N [[TRANS_STATE_COMMIT_DOING]] 66 * | 67 * | The heavy lifting transaction work is started. 68 * | From running delayed refs (modifying extent tree) to creating pending 69 * | snapshots, running qgroups. 70 * | In short, modify supporting trees to reflect modifications of subvolume 71 * | trees. 72 * | 73 * | At this stage, all start_transaction() calls will wait for this 74 * | transaction to finish and attach to transaction N+1. 75 * | 76 * | To next stage: 77 * | Until all supporting trees are updated. 78 * V 79 * Transaction N [[TRANS_STATE_UNBLOCKED]] 80 * | Transaction N+1 81 * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]] 82 * | need to write them back to disk and update | 83 * | super blocks. | 84 * | | 85 * | At this stage, new transaction is allowed to | 86 * | start. | 87 * | All new start_transaction() calls will be | 88 * | attached to transid N+1. | 89 * | | 90 * | To next stage: | 91 * | Until all tree blocks are super blocks are | 92 * | written to block devices | 93 * V | 94 * Transaction N [[TRANS_STATE_COMPLETED]] V 95 * All tree blocks and super blocks are written. Transaction N+1 96 * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]] 97 * data structures will be cleaned up. | Life goes on 98 */ 99 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { 100 [TRANS_STATE_RUNNING] = 0U, 101 [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), 102 [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | 103 __TRANS_ATTACH | 104 __TRANS_JOIN | 105 __TRANS_JOIN_NOSTART), 106 [TRANS_STATE_UNBLOCKED] = (__TRANS_START | 107 __TRANS_ATTACH | 108 __TRANS_JOIN | 109 __TRANS_JOIN_NOLOCK | 110 __TRANS_JOIN_NOSTART), 111 [TRANS_STATE_COMPLETED] = (__TRANS_START | 112 __TRANS_ATTACH | 113 __TRANS_JOIN | 114 __TRANS_JOIN_NOLOCK | 115 __TRANS_JOIN_NOSTART), 116 }; 117 118 void btrfs_put_transaction(struct btrfs_transaction *transaction) 119 { 120 WARN_ON(refcount_read(&transaction->use_count) == 0); 121 if (refcount_dec_and_test(&transaction->use_count)) { 122 BUG_ON(!list_empty(&transaction->list)); 123 WARN_ON(!RB_EMPTY_ROOT( 124 &transaction->delayed_refs.href_root.rb_root)); 125 WARN_ON(!RB_EMPTY_ROOT( 126 &transaction->delayed_refs.dirty_extent_root)); 127 if (transaction->delayed_refs.pending_csums) 128 btrfs_err(transaction->fs_info, 129 "pending csums is %llu", 130 transaction->delayed_refs.pending_csums); 131 /* 132 * If any block groups are found in ->deleted_bgs then it's 133 * because the transaction was aborted and a commit did not 134 * happen (things failed before writing the new superblock 135 * and calling btrfs_finish_extent_commit()), so we can not 136 * discard the physical locations of the block groups. 137 */ 138 while (!list_empty(&transaction->deleted_bgs)) { 139 struct btrfs_block_group *cache; 140 141 cache = list_first_entry(&transaction->deleted_bgs, 142 struct btrfs_block_group, 143 bg_list); 144 list_del_init(&cache->bg_list); 145 btrfs_unfreeze_block_group(cache); 146 btrfs_put_block_group(cache); 147 } 148 WARN_ON(!list_empty(&transaction->dev_update_list)); 149 kfree(transaction); 150 } 151 } 152 153 static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) 154 { 155 struct btrfs_transaction *cur_trans = trans->transaction; 156 struct btrfs_fs_info *fs_info = trans->fs_info; 157 struct btrfs_root *root, *tmp; 158 159 down_write(&fs_info->commit_root_sem); 160 list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits, 161 dirty_list) { 162 list_del_init(&root->dirty_list); 163 free_extent_buffer(root->commit_root); 164 root->commit_root = btrfs_root_node(root); 165 if (is_fstree(root->root_key.objectid)) 166 btrfs_unpin_free_ino(root); 167 extent_io_tree_release(&root->dirty_log_pages); 168 btrfs_qgroup_clean_swapped_blocks(root); 169 } 170 171 /* We can free old roots now. */ 172 spin_lock(&cur_trans->dropped_roots_lock); 173 while (!list_empty(&cur_trans->dropped_roots)) { 174 root = list_first_entry(&cur_trans->dropped_roots, 175 struct btrfs_root, root_list); 176 list_del_init(&root->root_list); 177 spin_unlock(&cur_trans->dropped_roots_lock); 178 btrfs_free_log(trans, root); 179 btrfs_drop_and_free_fs_root(fs_info, root); 180 spin_lock(&cur_trans->dropped_roots_lock); 181 } 182 spin_unlock(&cur_trans->dropped_roots_lock); 183 up_write(&fs_info->commit_root_sem); 184 } 185 186 static inline void extwriter_counter_inc(struct btrfs_transaction *trans, 187 unsigned int type) 188 { 189 if (type & TRANS_EXTWRITERS) 190 atomic_inc(&trans->num_extwriters); 191 } 192 193 static inline void extwriter_counter_dec(struct btrfs_transaction *trans, 194 unsigned int type) 195 { 196 if (type & TRANS_EXTWRITERS) 197 atomic_dec(&trans->num_extwriters); 198 } 199 200 static inline void extwriter_counter_init(struct btrfs_transaction *trans, 201 unsigned int type) 202 { 203 atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0)); 204 } 205 206 static inline int extwriter_counter_read(struct btrfs_transaction *trans) 207 { 208 return atomic_read(&trans->num_extwriters); 209 } 210 211 /* 212 * To be called after all the new block groups attached to the transaction 213 * handle have been created (btrfs_create_pending_block_groups()). 214 */ 215 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) 216 { 217 struct btrfs_fs_info *fs_info = trans->fs_info; 218 219 if (!trans->chunk_bytes_reserved) 220 return; 221 222 WARN_ON_ONCE(!list_empty(&trans->new_bgs)); 223 224 btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv, 225 trans->chunk_bytes_reserved, NULL); 226 trans->chunk_bytes_reserved = 0; 227 } 228 229 /* 230 * either allocate a new transaction or hop into the existing one 231 */ 232 static noinline int join_transaction(struct btrfs_fs_info *fs_info, 233 unsigned int type) 234 { 235 struct btrfs_transaction *cur_trans; 236 237 spin_lock(&fs_info->trans_lock); 238 loop: 239 /* The file system has been taken offline. No new transactions. */ 240 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 241 spin_unlock(&fs_info->trans_lock); 242 return -EROFS; 243 } 244 245 cur_trans = fs_info->running_transaction; 246 if (cur_trans) { 247 if (TRANS_ABORTED(cur_trans)) { 248 spin_unlock(&fs_info->trans_lock); 249 return cur_trans->aborted; 250 } 251 if (btrfs_blocked_trans_types[cur_trans->state] & type) { 252 spin_unlock(&fs_info->trans_lock); 253 return -EBUSY; 254 } 255 refcount_inc(&cur_trans->use_count); 256 atomic_inc(&cur_trans->num_writers); 257 extwriter_counter_inc(cur_trans, type); 258 spin_unlock(&fs_info->trans_lock); 259 return 0; 260 } 261 spin_unlock(&fs_info->trans_lock); 262 263 /* 264 * If we are ATTACH, we just want to catch the current transaction, 265 * and commit it. If there is no transaction, just return ENOENT. 266 */ 267 if (type == TRANS_ATTACH) 268 return -ENOENT; 269 270 /* 271 * JOIN_NOLOCK only happens during the transaction commit, so 272 * it is impossible that ->running_transaction is NULL 273 */ 274 BUG_ON(type == TRANS_JOIN_NOLOCK); 275 276 cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS); 277 if (!cur_trans) 278 return -ENOMEM; 279 280 spin_lock(&fs_info->trans_lock); 281 if (fs_info->running_transaction) { 282 /* 283 * someone started a transaction after we unlocked. Make sure 284 * to redo the checks above 285 */ 286 kfree(cur_trans); 287 goto loop; 288 } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 289 spin_unlock(&fs_info->trans_lock); 290 kfree(cur_trans); 291 return -EROFS; 292 } 293 294 cur_trans->fs_info = fs_info; 295 atomic_set(&cur_trans->num_writers, 1); 296 extwriter_counter_init(cur_trans, type); 297 init_waitqueue_head(&cur_trans->writer_wait); 298 init_waitqueue_head(&cur_trans->commit_wait); 299 cur_trans->state = TRANS_STATE_RUNNING; 300 /* 301 * One for this trans handle, one so it will live on until we 302 * commit the transaction. 303 */ 304 refcount_set(&cur_trans->use_count, 2); 305 cur_trans->flags = 0; 306 cur_trans->start_time = ktime_get_seconds(); 307 308 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs)); 309 310 cur_trans->delayed_refs.href_root = RB_ROOT_CACHED; 311 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT; 312 atomic_set(&cur_trans->delayed_refs.num_entries, 0); 313 314 /* 315 * although the tree mod log is per file system and not per transaction, 316 * the log must never go across transaction boundaries. 317 */ 318 smp_mb(); 319 if (!list_empty(&fs_info->tree_mod_seq_list)) 320 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n"); 321 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) 322 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n"); 323 atomic64_set(&fs_info->tree_mod_seq, 0); 324 325 spin_lock_init(&cur_trans->delayed_refs.lock); 326 327 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 328 INIT_LIST_HEAD(&cur_trans->dev_update_list); 329 INIT_LIST_HEAD(&cur_trans->switch_commits); 330 INIT_LIST_HEAD(&cur_trans->dirty_bgs); 331 INIT_LIST_HEAD(&cur_trans->io_bgs); 332 INIT_LIST_HEAD(&cur_trans->dropped_roots); 333 mutex_init(&cur_trans->cache_write_mutex); 334 spin_lock_init(&cur_trans->dirty_bgs_lock); 335 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 336 spin_lock_init(&cur_trans->dropped_roots_lock); 337 list_add_tail(&cur_trans->list, &fs_info->trans_list); 338 extent_io_tree_init(fs_info, &cur_trans->dirty_pages, 339 IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode); 340 extent_io_tree_init(fs_info, &cur_trans->pinned_extents, 341 IO_TREE_FS_PINNED_EXTENTS, NULL); 342 fs_info->generation++; 343 cur_trans->transid = fs_info->generation; 344 fs_info->running_transaction = cur_trans; 345 cur_trans->aborted = 0; 346 spin_unlock(&fs_info->trans_lock); 347 348 return 0; 349 } 350 351 /* 352 * This does all the record keeping required to make sure that a shareable root 353 * is properly recorded in a given transaction. This is required to make sure 354 * the old root from before we joined the transaction is deleted when the 355 * transaction commits. 356 */ 357 static int record_root_in_trans(struct btrfs_trans_handle *trans, 358 struct btrfs_root *root, 359 int force) 360 { 361 struct btrfs_fs_info *fs_info = root->fs_info; 362 363 if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 364 root->last_trans < trans->transid) || force) { 365 WARN_ON(root == fs_info->extent_root); 366 WARN_ON(!force && root->commit_root != root->node); 367 368 /* 369 * see below for IN_TRANS_SETUP usage rules 370 * we have the reloc mutex held now, so there 371 * is only one writer in this function 372 */ 373 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 374 375 /* make sure readers find IN_TRANS_SETUP before 376 * they find our root->last_trans update 377 */ 378 smp_wmb(); 379 380 spin_lock(&fs_info->fs_roots_radix_lock); 381 if (root->last_trans == trans->transid && !force) { 382 spin_unlock(&fs_info->fs_roots_radix_lock); 383 return 0; 384 } 385 radix_tree_tag_set(&fs_info->fs_roots_radix, 386 (unsigned long)root->root_key.objectid, 387 BTRFS_ROOT_TRANS_TAG); 388 spin_unlock(&fs_info->fs_roots_radix_lock); 389 root->last_trans = trans->transid; 390 391 /* this is pretty tricky. We don't want to 392 * take the relocation lock in btrfs_record_root_in_trans 393 * unless we're really doing the first setup for this root in 394 * this transaction. 395 * 396 * Normally we'd use root->last_trans as a flag to decide 397 * if we want to take the expensive mutex. 398 * 399 * But, we have to set root->last_trans before we 400 * init the relocation root, otherwise, we trip over warnings 401 * in ctree.c. The solution used here is to flag ourselves 402 * with root IN_TRANS_SETUP. When this is 1, we're still 403 * fixing up the reloc trees and everyone must wait. 404 * 405 * When this is zero, they can trust root->last_trans and fly 406 * through btrfs_record_root_in_trans without having to take the 407 * lock. smp_wmb() makes sure that all the writes above are 408 * done before we pop in the zero below 409 */ 410 btrfs_init_reloc_root(trans, root); 411 smp_mb__before_atomic(); 412 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 413 } 414 return 0; 415 } 416 417 418 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, 419 struct btrfs_root *root) 420 { 421 struct btrfs_fs_info *fs_info = root->fs_info; 422 struct btrfs_transaction *cur_trans = trans->transaction; 423 424 /* Add ourselves to the transaction dropped list */ 425 spin_lock(&cur_trans->dropped_roots_lock); 426 list_add_tail(&root->root_list, &cur_trans->dropped_roots); 427 spin_unlock(&cur_trans->dropped_roots_lock); 428 429 /* Make sure we don't try to update the root at commit time */ 430 spin_lock(&fs_info->fs_roots_radix_lock); 431 radix_tree_tag_clear(&fs_info->fs_roots_radix, 432 (unsigned long)root->root_key.objectid, 433 BTRFS_ROOT_TRANS_TAG); 434 spin_unlock(&fs_info->fs_roots_radix_lock); 435 } 436 437 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 438 struct btrfs_root *root) 439 { 440 struct btrfs_fs_info *fs_info = root->fs_info; 441 442 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 443 return 0; 444 445 /* 446 * see record_root_in_trans for comments about IN_TRANS_SETUP usage 447 * and barriers 448 */ 449 smp_rmb(); 450 if (root->last_trans == trans->transid && 451 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) 452 return 0; 453 454 mutex_lock(&fs_info->reloc_mutex); 455 record_root_in_trans(trans, root, 0); 456 mutex_unlock(&fs_info->reloc_mutex); 457 458 return 0; 459 } 460 461 static inline int is_transaction_blocked(struct btrfs_transaction *trans) 462 { 463 return (trans->state >= TRANS_STATE_COMMIT_START && 464 trans->state < TRANS_STATE_UNBLOCKED && 465 !TRANS_ABORTED(trans)); 466 } 467 468 /* wait for commit against the current transaction to become unblocked 469 * when this is done, it is safe to start a new transaction, but the current 470 * transaction might not be fully on disk. 471 */ 472 static void wait_current_trans(struct btrfs_fs_info *fs_info) 473 { 474 struct btrfs_transaction *cur_trans; 475 476 spin_lock(&fs_info->trans_lock); 477 cur_trans = fs_info->running_transaction; 478 if (cur_trans && is_transaction_blocked(cur_trans)) { 479 refcount_inc(&cur_trans->use_count); 480 spin_unlock(&fs_info->trans_lock); 481 482 wait_event(fs_info->transaction_wait, 483 cur_trans->state >= TRANS_STATE_UNBLOCKED || 484 TRANS_ABORTED(cur_trans)); 485 btrfs_put_transaction(cur_trans); 486 } else { 487 spin_unlock(&fs_info->trans_lock); 488 } 489 } 490 491 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type) 492 { 493 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 494 return 0; 495 496 if (type == TRANS_START) 497 return 1; 498 499 return 0; 500 } 501 502 static inline bool need_reserve_reloc_root(struct btrfs_root *root) 503 { 504 struct btrfs_fs_info *fs_info = root->fs_info; 505 506 if (!fs_info->reloc_ctl || 507 !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || 508 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 509 root->reloc_root) 510 return false; 511 512 return true; 513 } 514 515 static struct btrfs_trans_handle * 516 start_transaction(struct btrfs_root *root, unsigned int num_items, 517 unsigned int type, enum btrfs_reserve_flush_enum flush, 518 bool enforce_qgroups) 519 { 520 struct btrfs_fs_info *fs_info = root->fs_info; 521 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 522 struct btrfs_trans_handle *h; 523 struct btrfs_transaction *cur_trans; 524 u64 num_bytes = 0; 525 u64 qgroup_reserved = 0; 526 bool reloc_reserved = false; 527 bool do_chunk_alloc = false; 528 int ret; 529 530 /* Send isn't supposed to start transactions. */ 531 ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB); 532 533 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 534 return ERR_PTR(-EROFS); 535 536 if (current->journal_info) { 537 WARN_ON(type & TRANS_EXTWRITERS); 538 h = current->journal_info; 539 refcount_inc(&h->use_count); 540 WARN_ON(refcount_read(&h->use_count) > 2); 541 h->orig_rsv = h->block_rsv; 542 h->block_rsv = NULL; 543 goto got_it; 544 } 545 546 /* 547 * Do the reservation before we join the transaction so we can do all 548 * the appropriate flushing if need be. 549 */ 550 if (num_items && root != fs_info->chunk_root) { 551 struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv; 552 u64 delayed_refs_bytes = 0; 553 554 qgroup_reserved = num_items * fs_info->nodesize; 555 ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved, 556 enforce_qgroups); 557 if (ret) 558 return ERR_PTR(ret); 559 560 /* 561 * We want to reserve all the bytes we may need all at once, so 562 * we only do 1 enospc flushing cycle per transaction start. We 563 * accomplish this by simply assuming we'll do 2 x num_items 564 * worth of delayed refs updates in this trans handle, and 565 * refill that amount for whatever is missing in the reserve. 566 */ 567 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); 568 if (flush == BTRFS_RESERVE_FLUSH_ALL && 569 delayed_refs_rsv->full == 0) { 570 delayed_refs_bytes = num_bytes; 571 num_bytes <<= 1; 572 } 573 574 /* 575 * Do the reservation for the relocation root creation 576 */ 577 if (need_reserve_reloc_root(root)) { 578 num_bytes += fs_info->nodesize; 579 reloc_reserved = true; 580 } 581 582 ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush); 583 if (ret) 584 goto reserve_fail; 585 if (delayed_refs_bytes) { 586 btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv, 587 delayed_refs_bytes); 588 num_bytes -= delayed_refs_bytes; 589 } 590 591 if (rsv->space_info->force_alloc) 592 do_chunk_alloc = true; 593 } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && 594 !delayed_refs_rsv->full) { 595 /* 596 * Some people call with btrfs_start_transaction(root, 0) 597 * because they can be throttled, but have some other mechanism 598 * for reserving space. We still want these guys to refill the 599 * delayed block_rsv so just add 1 items worth of reservation 600 * here. 601 */ 602 ret = btrfs_delayed_refs_rsv_refill(fs_info, flush); 603 if (ret) 604 goto reserve_fail; 605 } 606 again: 607 h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS); 608 if (!h) { 609 ret = -ENOMEM; 610 goto alloc_fail; 611 } 612 613 /* 614 * If we are JOIN_NOLOCK we're already committing a transaction and 615 * waiting on this guy, so we don't need to do the sb_start_intwrite 616 * because we're already holding a ref. We need this because we could 617 * have raced in and did an fsync() on a file which can kick a commit 618 * and then we deadlock with somebody doing a freeze. 619 * 620 * If we are ATTACH, it means we just want to catch the current 621 * transaction and commit it, so we needn't do sb_start_intwrite(). 622 */ 623 if (type & __TRANS_FREEZABLE) 624 sb_start_intwrite(fs_info->sb); 625 626 if (may_wait_transaction(fs_info, type)) 627 wait_current_trans(fs_info); 628 629 do { 630 ret = join_transaction(fs_info, type); 631 if (ret == -EBUSY) { 632 wait_current_trans(fs_info); 633 if (unlikely(type == TRANS_ATTACH || 634 type == TRANS_JOIN_NOSTART)) 635 ret = -ENOENT; 636 } 637 } while (ret == -EBUSY); 638 639 if (ret < 0) 640 goto join_fail; 641 642 cur_trans = fs_info->running_transaction; 643 644 h->transid = cur_trans->transid; 645 h->transaction = cur_trans; 646 h->root = root; 647 refcount_set(&h->use_count, 1); 648 h->fs_info = root->fs_info; 649 650 h->type = type; 651 h->can_flush_pending_bgs = true; 652 INIT_LIST_HEAD(&h->new_bgs); 653 654 smp_mb(); 655 if (cur_trans->state >= TRANS_STATE_COMMIT_START && 656 may_wait_transaction(fs_info, type)) { 657 current->journal_info = h; 658 btrfs_commit_transaction(h); 659 goto again; 660 } 661 662 if (num_bytes) { 663 trace_btrfs_space_reservation(fs_info, "transaction", 664 h->transid, num_bytes, 1); 665 h->block_rsv = &fs_info->trans_block_rsv; 666 h->bytes_reserved = num_bytes; 667 h->reloc_reserved = reloc_reserved; 668 } 669 670 got_it: 671 if (!current->journal_info) 672 current->journal_info = h; 673 674 /* 675 * If the space_info is marked ALLOC_FORCE then we'll get upgraded to 676 * ALLOC_FORCE the first run through, and then we won't allocate for 677 * anybody else who races in later. We don't care about the return 678 * value here. 679 */ 680 if (do_chunk_alloc && num_bytes) { 681 u64 flags = h->block_rsv->space_info->flags; 682 683 btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags), 684 CHUNK_ALLOC_NO_FORCE); 685 } 686 687 /* 688 * btrfs_record_root_in_trans() needs to alloc new extents, and may 689 * call btrfs_join_transaction() while we're also starting a 690 * transaction. 691 * 692 * Thus it need to be called after current->journal_info initialized, 693 * or we can deadlock. 694 */ 695 btrfs_record_root_in_trans(h, root); 696 697 return h; 698 699 join_fail: 700 if (type & __TRANS_FREEZABLE) 701 sb_end_intwrite(fs_info->sb); 702 kmem_cache_free(btrfs_trans_handle_cachep, h); 703 alloc_fail: 704 if (num_bytes) 705 btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv, 706 num_bytes, NULL); 707 reserve_fail: 708 btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved); 709 return ERR_PTR(ret); 710 } 711 712 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 713 unsigned int num_items) 714 { 715 return start_transaction(root, num_items, TRANS_START, 716 BTRFS_RESERVE_FLUSH_ALL, true); 717 } 718 719 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( 720 struct btrfs_root *root, 721 unsigned int num_items) 722 { 723 return start_transaction(root, num_items, TRANS_START, 724 BTRFS_RESERVE_FLUSH_ALL_STEAL, false); 725 } 726 727 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 728 { 729 return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH, 730 true); 731 } 732 733 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root) 734 { 735 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 736 BTRFS_RESERVE_NO_FLUSH, true); 737 } 738 739 /* 740 * Similar to regular join but it never starts a transaction when none is 741 * running or after waiting for the current one to finish. 742 */ 743 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root) 744 { 745 return start_transaction(root, 0, TRANS_JOIN_NOSTART, 746 BTRFS_RESERVE_NO_FLUSH, true); 747 } 748 749 /* 750 * btrfs_attach_transaction() - catch the running transaction 751 * 752 * It is used when we want to commit the current the transaction, but 753 * don't want to start a new one. 754 * 755 * Note: If this function return -ENOENT, it just means there is no 756 * running transaction. But it is possible that the inactive transaction 757 * is still in the memory, not fully on disk. If you hope there is no 758 * inactive transaction in the fs when -ENOENT is returned, you should 759 * invoke 760 * btrfs_attach_transaction_barrier() 761 */ 762 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) 763 { 764 return start_transaction(root, 0, TRANS_ATTACH, 765 BTRFS_RESERVE_NO_FLUSH, true); 766 } 767 768 /* 769 * btrfs_attach_transaction_barrier() - catch the running transaction 770 * 771 * It is similar to the above function, the difference is this one 772 * will wait for all the inactive transactions until they fully 773 * complete. 774 */ 775 struct btrfs_trans_handle * 776 btrfs_attach_transaction_barrier(struct btrfs_root *root) 777 { 778 struct btrfs_trans_handle *trans; 779 780 trans = start_transaction(root, 0, TRANS_ATTACH, 781 BTRFS_RESERVE_NO_FLUSH, true); 782 if (trans == ERR_PTR(-ENOENT)) 783 btrfs_wait_for_commit(root->fs_info, 0); 784 785 return trans; 786 } 787 788 /* wait for a transaction commit to be fully complete */ 789 static noinline void wait_for_commit(struct btrfs_transaction *commit) 790 { 791 wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED); 792 } 793 794 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid) 795 { 796 struct btrfs_transaction *cur_trans = NULL, *t; 797 int ret = 0; 798 799 if (transid) { 800 if (transid <= fs_info->last_trans_committed) 801 goto out; 802 803 /* find specified transaction */ 804 spin_lock(&fs_info->trans_lock); 805 list_for_each_entry(t, &fs_info->trans_list, list) { 806 if (t->transid == transid) { 807 cur_trans = t; 808 refcount_inc(&cur_trans->use_count); 809 ret = 0; 810 break; 811 } 812 if (t->transid > transid) { 813 ret = 0; 814 break; 815 } 816 } 817 spin_unlock(&fs_info->trans_lock); 818 819 /* 820 * The specified transaction doesn't exist, or we 821 * raced with btrfs_commit_transaction 822 */ 823 if (!cur_trans) { 824 if (transid > fs_info->last_trans_committed) 825 ret = -EINVAL; 826 goto out; 827 } 828 } else { 829 /* find newest transaction that is committing | committed */ 830 spin_lock(&fs_info->trans_lock); 831 list_for_each_entry_reverse(t, &fs_info->trans_list, 832 list) { 833 if (t->state >= TRANS_STATE_COMMIT_START) { 834 if (t->state == TRANS_STATE_COMPLETED) 835 break; 836 cur_trans = t; 837 refcount_inc(&cur_trans->use_count); 838 break; 839 } 840 } 841 spin_unlock(&fs_info->trans_lock); 842 if (!cur_trans) 843 goto out; /* nothing committing|committed */ 844 } 845 846 wait_for_commit(cur_trans); 847 btrfs_put_transaction(cur_trans); 848 out: 849 return ret; 850 } 851 852 void btrfs_throttle(struct btrfs_fs_info *fs_info) 853 { 854 wait_current_trans(fs_info); 855 } 856 857 static int should_end_transaction(struct btrfs_trans_handle *trans) 858 { 859 struct btrfs_fs_info *fs_info = trans->fs_info; 860 861 if (btrfs_check_space_for_delayed_refs(fs_info)) 862 return 1; 863 864 return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5); 865 } 866 867 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans) 868 { 869 struct btrfs_transaction *cur_trans = trans->transaction; 870 871 smp_mb(); 872 if (cur_trans->state >= TRANS_STATE_COMMIT_START || 873 cur_trans->delayed_refs.flushing) 874 return 1; 875 876 return should_end_transaction(trans); 877 } 878 879 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans) 880 881 { 882 struct btrfs_fs_info *fs_info = trans->fs_info; 883 884 if (!trans->block_rsv) { 885 ASSERT(!trans->bytes_reserved); 886 return; 887 } 888 889 if (!trans->bytes_reserved) 890 return; 891 892 ASSERT(trans->block_rsv == &fs_info->trans_block_rsv); 893 trace_btrfs_space_reservation(fs_info, "transaction", 894 trans->transid, trans->bytes_reserved, 0); 895 btrfs_block_rsv_release(fs_info, trans->block_rsv, 896 trans->bytes_reserved, NULL); 897 trans->bytes_reserved = 0; 898 } 899 900 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 901 int throttle) 902 { 903 struct btrfs_fs_info *info = trans->fs_info; 904 struct btrfs_transaction *cur_trans = trans->transaction; 905 int err = 0; 906 907 if (refcount_read(&trans->use_count) > 1) { 908 refcount_dec(&trans->use_count); 909 trans->block_rsv = trans->orig_rsv; 910 return 0; 911 } 912 913 btrfs_trans_release_metadata(trans); 914 trans->block_rsv = NULL; 915 916 btrfs_create_pending_block_groups(trans); 917 918 btrfs_trans_release_chunk_metadata(trans); 919 920 if (trans->type & __TRANS_FREEZABLE) 921 sb_end_intwrite(info->sb); 922 923 WARN_ON(cur_trans != info->running_transaction); 924 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 925 atomic_dec(&cur_trans->num_writers); 926 extwriter_counter_dec(cur_trans, trans->type); 927 928 cond_wake_up(&cur_trans->writer_wait); 929 btrfs_put_transaction(cur_trans); 930 931 if (current->journal_info == trans) 932 current->journal_info = NULL; 933 934 if (throttle) 935 btrfs_run_delayed_iputs(info); 936 937 if (TRANS_ABORTED(trans) || 938 test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) { 939 wake_up_process(info->transaction_kthread); 940 err = -EIO; 941 } 942 943 kmem_cache_free(btrfs_trans_handle_cachep, trans); 944 return err; 945 } 946 947 int btrfs_end_transaction(struct btrfs_trans_handle *trans) 948 { 949 return __btrfs_end_transaction(trans, 0); 950 } 951 952 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans) 953 { 954 return __btrfs_end_transaction(trans, 1); 955 } 956 957 /* 958 * when btree blocks are allocated, they have some corresponding bits set for 959 * them in one of two extent_io trees. This is used to make sure all of 960 * those extents are sent to disk but does not wait on them 961 */ 962 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, 963 struct extent_io_tree *dirty_pages, int mark) 964 { 965 int err = 0; 966 int werr = 0; 967 struct address_space *mapping = fs_info->btree_inode->i_mapping; 968 struct extent_state *cached_state = NULL; 969 u64 start = 0; 970 u64 end; 971 972 atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers); 973 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 974 mark, &cached_state)) { 975 bool wait_writeback = false; 976 977 err = convert_extent_bit(dirty_pages, start, end, 978 EXTENT_NEED_WAIT, 979 mark, &cached_state); 980 /* 981 * convert_extent_bit can return -ENOMEM, which is most of the 982 * time a temporary error. So when it happens, ignore the error 983 * and wait for writeback of this range to finish - because we 984 * failed to set the bit EXTENT_NEED_WAIT for the range, a call 985 * to __btrfs_wait_marked_extents() would not know that 986 * writeback for this range started and therefore wouldn't 987 * wait for it to finish - we don't want to commit a 988 * superblock that points to btree nodes/leafs for which 989 * writeback hasn't finished yet (and without errors). 990 * We cleanup any entries left in the io tree when committing 991 * the transaction (through extent_io_tree_release()). 992 */ 993 if (err == -ENOMEM) { 994 err = 0; 995 wait_writeback = true; 996 } 997 if (!err) 998 err = filemap_fdatawrite_range(mapping, start, end); 999 if (err) 1000 werr = err; 1001 else if (wait_writeback) 1002 werr = filemap_fdatawait_range(mapping, start, end); 1003 free_extent_state(cached_state); 1004 cached_state = NULL; 1005 cond_resched(); 1006 start = end + 1; 1007 } 1008 atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers); 1009 return werr; 1010 } 1011 1012 /* 1013 * when btree blocks are allocated, they have some corresponding bits set for 1014 * them in one of two extent_io trees. This is used to make sure all of 1015 * those extents are on disk for transaction or log commit. We wait 1016 * on all the pages and clear them from the dirty pages state tree 1017 */ 1018 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info, 1019 struct extent_io_tree *dirty_pages) 1020 { 1021 int err = 0; 1022 int werr = 0; 1023 struct address_space *mapping = fs_info->btree_inode->i_mapping; 1024 struct extent_state *cached_state = NULL; 1025 u64 start = 0; 1026 u64 end; 1027 1028 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 1029 EXTENT_NEED_WAIT, &cached_state)) { 1030 /* 1031 * Ignore -ENOMEM errors returned by clear_extent_bit(). 1032 * When committing the transaction, we'll remove any entries 1033 * left in the io tree. For a log commit, we don't remove them 1034 * after committing the log because the tree can be accessed 1035 * concurrently - we do it only at transaction commit time when 1036 * it's safe to do it (through extent_io_tree_release()). 1037 */ 1038 err = clear_extent_bit(dirty_pages, start, end, 1039 EXTENT_NEED_WAIT, 0, 0, &cached_state); 1040 if (err == -ENOMEM) 1041 err = 0; 1042 if (!err) 1043 err = filemap_fdatawait_range(mapping, start, end); 1044 if (err) 1045 werr = err; 1046 free_extent_state(cached_state); 1047 cached_state = NULL; 1048 cond_resched(); 1049 start = end + 1; 1050 } 1051 if (err) 1052 werr = err; 1053 return werr; 1054 } 1055 1056 static int btrfs_wait_extents(struct btrfs_fs_info *fs_info, 1057 struct extent_io_tree *dirty_pages) 1058 { 1059 bool errors = false; 1060 int err; 1061 1062 err = __btrfs_wait_marked_extents(fs_info, dirty_pages); 1063 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags)) 1064 errors = true; 1065 1066 if (errors && !err) 1067 err = -EIO; 1068 return err; 1069 } 1070 1071 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark) 1072 { 1073 struct btrfs_fs_info *fs_info = log_root->fs_info; 1074 struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages; 1075 bool errors = false; 1076 int err; 1077 1078 ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 1079 1080 err = __btrfs_wait_marked_extents(fs_info, dirty_pages); 1081 if ((mark & EXTENT_DIRTY) && 1082 test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags)) 1083 errors = true; 1084 1085 if ((mark & EXTENT_NEW) && 1086 test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags)) 1087 errors = true; 1088 1089 if (errors && !err) 1090 err = -EIO; 1091 return err; 1092 } 1093 1094 /* 1095 * When btree blocks are allocated the corresponding extents are marked dirty. 1096 * This function ensures such extents are persisted on disk for transaction or 1097 * log commit. 1098 * 1099 * @trans: transaction whose dirty pages we'd like to write 1100 */ 1101 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans) 1102 { 1103 int ret; 1104 int ret2; 1105 struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages; 1106 struct btrfs_fs_info *fs_info = trans->fs_info; 1107 struct blk_plug plug; 1108 1109 blk_start_plug(&plug); 1110 ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY); 1111 blk_finish_plug(&plug); 1112 ret2 = btrfs_wait_extents(fs_info, dirty_pages); 1113 1114 extent_io_tree_release(&trans->transaction->dirty_pages); 1115 1116 if (ret) 1117 return ret; 1118 else if (ret2) 1119 return ret2; 1120 else 1121 return 0; 1122 } 1123 1124 /* 1125 * this is used to update the root pointer in the tree of tree roots. 1126 * 1127 * But, in the case of the extent allocation tree, updating the root 1128 * pointer may allocate blocks which may change the root of the extent 1129 * allocation tree. 1130 * 1131 * So, this loops and repeats and makes sure the cowonly root didn't 1132 * change while the root pointer was being updated in the metadata. 1133 */ 1134 static int update_cowonly_root(struct btrfs_trans_handle *trans, 1135 struct btrfs_root *root) 1136 { 1137 int ret; 1138 u64 old_root_bytenr; 1139 u64 old_root_used; 1140 struct btrfs_fs_info *fs_info = root->fs_info; 1141 struct btrfs_root *tree_root = fs_info->tree_root; 1142 1143 old_root_used = btrfs_root_used(&root->root_item); 1144 1145 while (1) { 1146 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 1147 if (old_root_bytenr == root->node->start && 1148 old_root_used == btrfs_root_used(&root->root_item)) 1149 break; 1150 1151 btrfs_set_root_node(&root->root_item, root->node); 1152 ret = btrfs_update_root(trans, tree_root, 1153 &root->root_key, 1154 &root->root_item); 1155 if (ret) 1156 return ret; 1157 1158 old_root_used = btrfs_root_used(&root->root_item); 1159 } 1160 1161 return 0; 1162 } 1163 1164 /* 1165 * update all the cowonly tree roots on disk 1166 * 1167 * The error handling in this function may not be obvious. Any of the 1168 * failures will cause the file system to go offline. We still need 1169 * to clean up the delayed refs. 1170 */ 1171 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans) 1172 { 1173 struct btrfs_fs_info *fs_info = trans->fs_info; 1174 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; 1175 struct list_head *io_bgs = &trans->transaction->io_bgs; 1176 struct list_head *next; 1177 struct extent_buffer *eb; 1178 int ret; 1179 1180 eb = btrfs_lock_root_node(fs_info->tree_root); 1181 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 1182 0, &eb); 1183 btrfs_tree_unlock(eb); 1184 free_extent_buffer(eb); 1185 1186 if (ret) 1187 return ret; 1188 1189 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1190 if (ret) 1191 return ret; 1192 1193 ret = btrfs_run_dev_stats(trans); 1194 if (ret) 1195 return ret; 1196 ret = btrfs_run_dev_replace(trans); 1197 if (ret) 1198 return ret; 1199 ret = btrfs_run_qgroups(trans); 1200 if (ret) 1201 return ret; 1202 1203 ret = btrfs_setup_space_cache(trans); 1204 if (ret) 1205 return ret; 1206 1207 /* run_qgroups might have added some more refs */ 1208 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1209 if (ret) 1210 return ret; 1211 again: 1212 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 1213 struct btrfs_root *root; 1214 next = fs_info->dirty_cowonly_roots.next; 1215 list_del_init(next); 1216 root = list_entry(next, struct btrfs_root, dirty_list); 1217 clear_bit(BTRFS_ROOT_DIRTY, &root->state); 1218 1219 if (root != fs_info->extent_root) 1220 list_add_tail(&root->dirty_list, 1221 &trans->transaction->switch_commits); 1222 ret = update_cowonly_root(trans, root); 1223 if (ret) 1224 return ret; 1225 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1226 if (ret) 1227 return ret; 1228 } 1229 1230 while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) { 1231 ret = btrfs_write_dirty_block_groups(trans); 1232 if (ret) 1233 return ret; 1234 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1235 if (ret) 1236 return ret; 1237 } 1238 1239 if (!list_empty(&fs_info->dirty_cowonly_roots)) 1240 goto again; 1241 1242 list_add_tail(&fs_info->extent_root->dirty_list, 1243 &trans->transaction->switch_commits); 1244 1245 /* Update dev-replace pointer once everything is committed */ 1246 fs_info->dev_replace.committed_cursor_left = 1247 fs_info->dev_replace.cursor_left_last_write_of_item; 1248 1249 return 0; 1250 } 1251 1252 /* 1253 * dead roots are old snapshots that need to be deleted. This allocates 1254 * a dirty root struct and adds it into the list of dead roots that need to 1255 * be deleted 1256 */ 1257 void btrfs_add_dead_root(struct btrfs_root *root) 1258 { 1259 struct btrfs_fs_info *fs_info = root->fs_info; 1260 1261 spin_lock(&fs_info->trans_lock); 1262 if (list_empty(&root->root_list)) { 1263 btrfs_grab_root(root); 1264 list_add_tail(&root->root_list, &fs_info->dead_roots); 1265 } 1266 spin_unlock(&fs_info->trans_lock); 1267 } 1268 1269 /* 1270 * update all the cowonly tree roots on disk 1271 */ 1272 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) 1273 { 1274 struct btrfs_fs_info *fs_info = trans->fs_info; 1275 struct btrfs_root *gang[8]; 1276 int i; 1277 int ret; 1278 int err = 0; 1279 1280 spin_lock(&fs_info->fs_roots_radix_lock); 1281 while (1) { 1282 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 1283 (void **)gang, 0, 1284 ARRAY_SIZE(gang), 1285 BTRFS_ROOT_TRANS_TAG); 1286 if (ret == 0) 1287 break; 1288 for (i = 0; i < ret; i++) { 1289 struct btrfs_root *root = gang[i]; 1290 radix_tree_tag_clear(&fs_info->fs_roots_radix, 1291 (unsigned long)root->root_key.objectid, 1292 BTRFS_ROOT_TRANS_TAG); 1293 spin_unlock(&fs_info->fs_roots_radix_lock); 1294 1295 btrfs_free_log(trans, root); 1296 btrfs_update_reloc_root(trans, root); 1297 1298 btrfs_save_ino_cache(root, trans); 1299 1300 /* see comments in should_cow_block() */ 1301 clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1302 smp_mb__after_atomic(); 1303 1304 if (root->commit_root != root->node) { 1305 list_add_tail(&root->dirty_list, 1306 &trans->transaction->switch_commits); 1307 btrfs_set_root_node(&root->root_item, 1308 root->node); 1309 } 1310 1311 err = btrfs_update_root(trans, fs_info->tree_root, 1312 &root->root_key, 1313 &root->root_item); 1314 spin_lock(&fs_info->fs_roots_radix_lock); 1315 if (err) 1316 break; 1317 btrfs_qgroup_free_meta_all_pertrans(root); 1318 } 1319 } 1320 spin_unlock(&fs_info->fs_roots_radix_lock); 1321 return err; 1322 } 1323 1324 /* 1325 * defrag a given btree. 1326 * Every leaf in the btree is read and defragged. 1327 */ 1328 int btrfs_defrag_root(struct btrfs_root *root) 1329 { 1330 struct btrfs_fs_info *info = root->fs_info; 1331 struct btrfs_trans_handle *trans; 1332 int ret; 1333 1334 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state)) 1335 return 0; 1336 1337 while (1) { 1338 trans = btrfs_start_transaction(root, 0); 1339 if (IS_ERR(trans)) 1340 return PTR_ERR(trans); 1341 1342 ret = btrfs_defrag_leaves(trans, root); 1343 1344 btrfs_end_transaction(trans); 1345 btrfs_btree_balance_dirty(info); 1346 cond_resched(); 1347 1348 if (btrfs_fs_closing(info) || ret != -EAGAIN) 1349 break; 1350 1351 if (btrfs_defrag_cancelled(info)) { 1352 btrfs_debug(info, "defrag_root cancelled"); 1353 ret = -EAGAIN; 1354 break; 1355 } 1356 } 1357 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state); 1358 return ret; 1359 } 1360 1361 /* 1362 * Do all special snapshot related qgroup dirty hack. 1363 * 1364 * Will do all needed qgroup inherit and dirty hack like switch commit 1365 * roots inside one transaction and write all btree into disk, to make 1366 * qgroup works. 1367 */ 1368 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, 1369 struct btrfs_root *src, 1370 struct btrfs_root *parent, 1371 struct btrfs_qgroup_inherit *inherit, 1372 u64 dst_objectid) 1373 { 1374 struct btrfs_fs_info *fs_info = src->fs_info; 1375 int ret; 1376 1377 /* 1378 * Save some performance in the case that qgroups are not 1379 * enabled. If this check races with the ioctl, rescan will 1380 * kick in anyway. 1381 */ 1382 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 1383 return 0; 1384 1385 /* 1386 * Ensure dirty @src will be committed. Or, after coming 1387 * commit_fs_roots() and switch_commit_roots(), any dirty but not 1388 * recorded root will never be updated again, causing an outdated root 1389 * item. 1390 */ 1391 record_root_in_trans(trans, src, 1); 1392 1393 /* 1394 * We are going to commit transaction, see btrfs_commit_transaction() 1395 * comment for reason locking tree_log_mutex 1396 */ 1397 mutex_lock(&fs_info->tree_log_mutex); 1398 1399 ret = commit_fs_roots(trans); 1400 if (ret) 1401 goto out; 1402 ret = btrfs_qgroup_account_extents(trans); 1403 if (ret < 0) 1404 goto out; 1405 1406 /* Now qgroup are all updated, we can inherit it to new qgroups */ 1407 ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid, 1408 inherit); 1409 if (ret < 0) 1410 goto out; 1411 1412 /* 1413 * Now we do a simplified commit transaction, which will: 1414 * 1) commit all subvolume and extent tree 1415 * To ensure all subvolume and extent tree have a valid 1416 * commit_root to accounting later insert_dir_item() 1417 * 2) write all btree blocks onto disk 1418 * This is to make sure later btree modification will be cowed 1419 * Or commit_root can be populated and cause wrong qgroup numbers 1420 * In this simplified commit, we don't really care about other trees 1421 * like chunk and root tree, as they won't affect qgroup. 1422 * And we don't write super to avoid half committed status. 1423 */ 1424 ret = commit_cowonly_roots(trans); 1425 if (ret) 1426 goto out; 1427 switch_commit_roots(trans); 1428 ret = btrfs_write_and_wait_transaction(trans); 1429 if (ret) 1430 btrfs_handle_fs_error(fs_info, ret, 1431 "Error while writing out transaction for qgroup"); 1432 1433 out: 1434 mutex_unlock(&fs_info->tree_log_mutex); 1435 1436 /* 1437 * Force parent root to be updated, as we recorded it before so its 1438 * last_trans == cur_transid. 1439 * Or it won't be committed again onto disk after later 1440 * insert_dir_item() 1441 */ 1442 if (!ret) 1443 record_root_in_trans(trans, parent, 1); 1444 return ret; 1445 } 1446 1447 /* 1448 * new snapshots need to be created at a very specific time in the 1449 * transaction commit. This does the actual creation. 1450 * 1451 * Note: 1452 * If the error which may affect the commitment of the current transaction 1453 * happens, we should return the error number. If the error which just affect 1454 * the creation of the pending snapshots, just return 0. 1455 */ 1456 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 1457 struct btrfs_pending_snapshot *pending) 1458 { 1459 1460 struct btrfs_fs_info *fs_info = trans->fs_info; 1461 struct btrfs_key key; 1462 struct btrfs_root_item *new_root_item; 1463 struct btrfs_root *tree_root = fs_info->tree_root; 1464 struct btrfs_root *root = pending->root; 1465 struct btrfs_root *parent_root; 1466 struct btrfs_block_rsv *rsv; 1467 struct inode *parent_inode; 1468 struct btrfs_path *path; 1469 struct btrfs_dir_item *dir_item; 1470 struct dentry *dentry; 1471 struct extent_buffer *tmp; 1472 struct extent_buffer *old; 1473 struct timespec64 cur_time; 1474 int ret = 0; 1475 u64 to_reserve = 0; 1476 u64 index = 0; 1477 u64 objectid; 1478 u64 root_flags; 1479 1480 ASSERT(pending->path); 1481 path = pending->path; 1482 1483 ASSERT(pending->root_item); 1484 new_root_item = pending->root_item; 1485 1486 pending->error = btrfs_find_free_objectid(tree_root, &objectid); 1487 if (pending->error) 1488 goto no_free_objectid; 1489 1490 /* 1491 * Make qgroup to skip current new snapshot's qgroupid, as it is 1492 * accounted by later btrfs_qgroup_inherit(). 1493 */ 1494 btrfs_set_skip_qgroup(trans, objectid); 1495 1496 btrfs_reloc_pre_snapshot(pending, &to_reserve); 1497 1498 if (to_reserve > 0) { 1499 pending->error = btrfs_block_rsv_add(root, 1500 &pending->block_rsv, 1501 to_reserve, 1502 BTRFS_RESERVE_NO_FLUSH); 1503 if (pending->error) 1504 goto clear_skip_qgroup; 1505 } 1506 1507 key.objectid = objectid; 1508 key.offset = (u64)-1; 1509 key.type = BTRFS_ROOT_ITEM_KEY; 1510 1511 rsv = trans->block_rsv; 1512 trans->block_rsv = &pending->block_rsv; 1513 trans->bytes_reserved = trans->block_rsv->reserved; 1514 trace_btrfs_space_reservation(fs_info, "transaction", 1515 trans->transid, 1516 trans->bytes_reserved, 1); 1517 dentry = pending->dentry; 1518 parent_inode = pending->dir; 1519 parent_root = BTRFS_I(parent_inode)->root; 1520 record_root_in_trans(trans, parent_root, 0); 1521 1522 cur_time = current_time(parent_inode); 1523 1524 /* 1525 * insert the directory item 1526 */ 1527 ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index); 1528 BUG_ON(ret); /* -ENOMEM */ 1529 1530 /* check if there is a file/dir which has the same name. */ 1531 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, 1532 btrfs_ino(BTRFS_I(parent_inode)), 1533 dentry->d_name.name, 1534 dentry->d_name.len, 0); 1535 if (dir_item != NULL && !IS_ERR(dir_item)) { 1536 pending->error = -EEXIST; 1537 goto dir_item_existed; 1538 } else if (IS_ERR(dir_item)) { 1539 ret = PTR_ERR(dir_item); 1540 btrfs_abort_transaction(trans, ret); 1541 goto fail; 1542 } 1543 btrfs_release_path(path); 1544 1545 /* 1546 * pull in the delayed directory update 1547 * and the delayed inode item 1548 * otherwise we corrupt the FS during 1549 * snapshot 1550 */ 1551 ret = btrfs_run_delayed_items(trans); 1552 if (ret) { /* Transaction aborted */ 1553 btrfs_abort_transaction(trans, ret); 1554 goto fail; 1555 } 1556 1557 record_root_in_trans(trans, root, 0); 1558 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 1559 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 1560 btrfs_check_and_init_root_item(new_root_item); 1561 1562 root_flags = btrfs_root_flags(new_root_item); 1563 if (pending->readonly) 1564 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 1565 else 1566 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 1567 btrfs_set_root_flags(new_root_item, root_flags); 1568 1569 btrfs_set_root_generation_v2(new_root_item, 1570 trans->transid); 1571 generate_random_guid(new_root_item->uuid); 1572 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1573 BTRFS_UUID_SIZE); 1574 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) { 1575 memset(new_root_item->received_uuid, 0, 1576 sizeof(new_root_item->received_uuid)); 1577 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1578 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1579 btrfs_set_root_stransid(new_root_item, 0); 1580 btrfs_set_root_rtransid(new_root_item, 0); 1581 } 1582 btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec); 1583 btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec); 1584 btrfs_set_root_otransid(new_root_item, trans->transid); 1585 1586 old = btrfs_lock_root_node(root); 1587 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old); 1588 if (ret) { 1589 btrfs_tree_unlock(old); 1590 free_extent_buffer(old); 1591 btrfs_abort_transaction(trans, ret); 1592 goto fail; 1593 } 1594 1595 btrfs_set_lock_blocking_write(old); 1596 1597 ret = btrfs_copy_root(trans, root, old, &tmp, objectid); 1598 /* clean up in any case */ 1599 btrfs_tree_unlock(old); 1600 free_extent_buffer(old); 1601 if (ret) { 1602 btrfs_abort_transaction(trans, ret); 1603 goto fail; 1604 } 1605 /* see comments in should_cow_block() */ 1606 set_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1607 smp_wmb(); 1608 1609 btrfs_set_root_node(new_root_item, tmp); 1610 /* record when the snapshot was created in key.offset */ 1611 key.offset = trans->transid; 1612 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1613 btrfs_tree_unlock(tmp); 1614 free_extent_buffer(tmp); 1615 if (ret) { 1616 btrfs_abort_transaction(trans, ret); 1617 goto fail; 1618 } 1619 1620 /* 1621 * insert root back/forward references 1622 */ 1623 ret = btrfs_add_root_ref(trans, objectid, 1624 parent_root->root_key.objectid, 1625 btrfs_ino(BTRFS_I(parent_inode)), index, 1626 dentry->d_name.name, dentry->d_name.len); 1627 if (ret) { 1628 btrfs_abort_transaction(trans, ret); 1629 goto fail; 1630 } 1631 1632 key.offset = (u64)-1; 1633 pending->snap = btrfs_get_fs_root(fs_info, objectid, true); 1634 if (IS_ERR(pending->snap)) { 1635 ret = PTR_ERR(pending->snap); 1636 btrfs_abort_transaction(trans, ret); 1637 goto fail; 1638 } 1639 1640 ret = btrfs_reloc_post_snapshot(trans, pending); 1641 if (ret) { 1642 btrfs_abort_transaction(trans, ret); 1643 goto fail; 1644 } 1645 1646 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1647 if (ret) { 1648 btrfs_abort_transaction(trans, ret); 1649 goto fail; 1650 } 1651 1652 /* 1653 * Do special qgroup accounting for snapshot, as we do some qgroup 1654 * snapshot hack to do fast snapshot. 1655 * To co-operate with that hack, we do hack again. 1656 * Or snapshot will be greatly slowed down by a subtree qgroup rescan 1657 */ 1658 ret = qgroup_account_snapshot(trans, root, parent_root, 1659 pending->inherit, objectid); 1660 if (ret < 0) 1661 goto fail; 1662 1663 ret = btrfs_insert_dir_item(trans, dentry->d_name.name, 1664 dentry->d_name.len, BTRFS_I(parent_inode), 1665 &key, BTRFS_FT_DIR, index); 1666 /* We have check then name at the beginning, so it is impossible. */ 1667 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); 1668 if (ret) { 1669 btrfs_abort_transaction(trans, ret); 1670 goto fail; 1671 } 1672 1673 btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size + 1674 dentry->d_name.len * 2); 1675 parent_inode->i_mtime = parent_inode->i_ctime = 1676 current_time(parent_inode); 1677 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode); 1678 if (ret) { 1679 btrfs_abort_transaction(trans, ret); 1680 goto fail; 1681 } 1682 ret = btrfs_uuid_tree_add(trans, new_root_item->uuid, 1683 BTRFS_UUID_KEY_SUBVOL, 1684 objectid); 1685 if (ret) { 1686 btrfs_abort_transaction(trans, ret); 1687 goto fail; 1688 } 1689 if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) { 1690 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid, 1691 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 1692 objectid); 1693 if (ret && ret != -EEXIST) { 1694 btrfs_abort_transaction(trans, ret); 1695 goto fail; 1696 } 1697 } 1698 1699 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1700 if (ret) { 1701 btrfs_abort_transaction(trans, ret); 1702 goto fail; 1703 } 1704 1705 fail: 1706 pending->error = ret; 1707 dir_item_existed: 1708 trans->block_rsv = rsv; 1709 trans->bytes_reserved = 0; 1710 clear_skip_qgroup: 1711 btrfs_clear_skip_qgroup(trans); 1712 no_free_objectid: 1713 kfree(new_root_item); 1714 pending->root_item = NULL; 1715 btrfs_free_path(path); 1716 pending->path = NULL; 1717 1718 return ret; 1719 } 1720 1721 /* 1722 * create all the snapshots we've scheduled for creation 1723 */ 1724 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans) 1725 { 1726 struct btrfs_pending_snapshot *pending, *next; 1727 struct list_head *head = &trans->transaction->pending_snapshots; 1728 int ret = 0; 1729 1730 list_for_each_entry_safe(pending, next, head, list) { 1731 list_del(&pending->list); 1732 ret = create_pending_snapshot(trans, pending); 1733 if (ret) 1734 break; 1735 } 1736 return ret; 1737 } 1738 1739 static void update_super_roots(struct btrfs_fs_info *fs_info) 1740 { 1741 struct btrfs_root_item *root_item; 1742 struct btrfs_super_block *super; 1743 1744 super = fs_info->super_copy; 1745 1746 root_item = &fs_info->chunk_root->root_item; 1747 super->chunk_root = root_item->bytenr; 1748 super->chunk_root_generation = root_item->generation; 1749 super->chunk_root_level = root_item->level; 1750 1751 root_item = &fs_info->tree_root->root_item; 1752 super->root = root_item->bytenr; 1753 super->generation = root_item->generation; 1754 super->root_level = root_item->level; 1755 if (btrfs_test_opt(fs_info, SPACE_CACHE)) 1756 super->cache_generation = root_item->generation; 1757 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags)) 1758 super->uuid_tree_generation = root_item->generation; 1759 } 1760 1761 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1762 { 1763 struct btrfs_transaction *trans; 1764 int ret = 0; 1765 1766 spin_lock(&info->trans_lock); 1767 trans = info->running_transaction; 1768 if (trans) 1769 ret = (trans->state >= TRANS_STATE_COMMIT_START); 1770 spin_unlock(&info->trans_lock); 1771 return ret; 1772 } 1773 1774 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1775 { 1776 struct btrfs_transaction *trans; 1777 int ret = 0; 1778 1779 spin_lock(&info->trans_lock); 1780 trans = info->running_transaction; 1781 if (trans) 1782 ret = is_transaction_blocked(trans); 1783 spin_unlock(&info->trans_lock); 1784 return ret; 1785 } 1786 1787 /* 1788 * wait for the current transaction commit to start and block subsequent 1789 * transaction joins 1790 */ 1791 static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info, 1792 struct btrfs_transaction *trans) 1793 { 1794 wait_event(fs_info->transaction_blocked_wait, 1795 trans->state >= TRANS_STATE_COMMIT_START || 1796 TRANS_ABORTED(trans)); 1797 } 1798 1799 /* 1800 * wait for the current transaction to start and then become unblocked. 1801 * caller holds ref. 1802 */ 1803 static void wait_current_trans_commit_start_and_unblock( 1804 struct btrfs_fs_info *fs_info, 1805 struct btrfs_transaction *trans) 1806 { 1807 wait_event(fs_info->transaction_wait, 1808 trans->state >= TRANS_STATE_UNBLOCKED || 1809 TRANS_ABORTED(trans)); 1810 } 1811 1812 /* 1813 * commit transactions asynchronously. once btrfs_commit_transaction_async 1814 * returns, any subsequent transaction will not be allowed to join. 1815 */ 1816 struct btrfs_async_commit { 1817 struct btrfs_trans_handle *newtrans; 1818 struct work_struct work; 1819 }; 1820 1821 static void do_async_commit(struct work_struct *work) 1822 { 1823 struct btrfs_async_commit *ac = 1824 container_of(work, struct btrfs_async_commit, work); 1825 1826 /* 1827 * We've got freeze protection passed with the transaction. 1828 * Tell lockdep about it. 1829 */ 1830 if (ac->newtrans->type & __TRANS_FREEZABLE) 1831 __sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS); 1832 1833 current->journal_info = ac->newtrans; 1834 1835 btrfs_commit_transaction(ac->newtrans); 1836 kfree(ac); 1837 } 1838 1839 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1840 int wait_for_unblock) 1841 { 1842 struct btrfs_fs_info *fs_info = trans->fs_info; 1843 struct btrfs_async_commit *ac; 1844 struct btrfs_transaction *cur_trans; 1845 1846 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1847 if (!ac) 1848 return -ENOMEM; 1849 1850 INIT_WORK(&ac->work, do_async_commit); 1851 ac->newtrans = btrfs_join_transaction(trans->root); 1852 if (IS_ERR(ac->newtrans)) { 1853 int err = PTR_ERR(ac->newtrans); 1854 kfree(ac); 1855 return err; 1856 } 1857 1858 /* take transaction reference */ 1859 cur_trans = trans->transaction; 1860 refcount_inc(&cur_trans->use_count); 1861 1862 btrfs_end_transaction(trans); 1863 1864 /* 1865 * Tell lockdep we've released the freeze rwsem, since the 1866 * async commit thread will be the one to unlock it. 1867 */ 1868 if (ac->newtrans->type & __TRANS_FREEZABLE) 1869 __sb_writers_release(fs_info->sb, SB_FREEZE_FS); 1870 1871 schedule_work(&ac->work); 1872 1873 /* wait for transaction to start and unblock */ 1874 if (wait_for_unblock) 1875 wait_current_trans_commit_start_and_unblock(fs_info, cur_trans); 1876 else 1877 wait_current_trans_commit_start(fs_info, cur_trans); 1878 1879 if (current->journal_info == trans) 1880 current->journal_info = NULL; 1881 1882 btrfs_put_transaction(cur_trans); 1883 return 0; 1884 } 1885 1886 1887 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err) 1888 { 1889 struct btrfs_fs_info *fs_info = trans->fs_info; 1890 struct btrfs_transaction *cur_trans = trans->transaction; 1891 1892 WARN_ON(refcount_read(&trans->use_count) > 1); 1893 1894 btrfs_abort_transaction(trans, err); 1895 1896 spin_lock(&fs_info->trans_lock); 1897 1898 /* 1899 * If the transaction is removed from the list, it means this 1900 * transaction has been committed successfully, so it is impossible 1901 * to call the cleanup function. 1902 */ 1903 BUG_ON(list_empty(&cur_trans->list)); 1904 1905 list_del_init(&cur_trans->list); 1906 if (cur_trans == fs_info->running_transaction) { 1907 cur_trans->state = TRANS_STATE_COMMIT_DOING; 1908 spin_unlock(&fs_info->trans_lock); 1909 wait_event(cur_trans->writer_wait, 1910 atomic_read(&cur_trans->num_writers) == 1); 1911 1912 spin_lock(&fs_info->trans_lock); 1913 } 1914 spin_unlock(&fs_info->trans_lock); 1915 1916 btrfs_cleanup_one_transaction(trans->transaction, fs_info); 1917 1918 spin_lock(&fs_info->trans_lock); 1919 if (cur_trans == fs_info->running_transaction) 1920 fs_info->running_transaction = NULL; 1921 spin_unlock(&fs_info->trans_lock); 1922 1923 if (trans->type & __TRANS_FREEZABLE) 1924 sb_end_intwrite(fs_info->sb); 1925 btrfs_put_transaction(cur_trans); 1926 btrfs_put_transaction(cur_trans); 1927 1928 trace_btrfs_transaction_commit(trans->root); 1929 1930 if (current->journal_info == trans) 1931 current->journal_info = NULL; 1932 btrfs_scrub_cancel(fs_info); 1933 1934 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1935 } 1936 1937 /* 1938 * Release reserved delayed ref space of all pending block groups of the 1939 * transaction and remove them from the list 1940 */ 1941 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) 1942 { 1943 struct btrfs_fs_info *fs_info = trans->fs_info; 1944 struct btrfs_block_group *block_group, *tmp; 1945 1946 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { 1947 btrfs_delayed_refs_rsv_release(fs_info, 1); 1948 list_del_init(&block_group->bg_list); 1949 } 1950 } 1951 1952 static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans) 1953 { 1954 struct btrfs_fs_info *fs_info = trans->fs_info; 1955 1956 /* 1957 * We use writeback_inodes_sb here because if we used 1958 * btrfs_start_delalloc_roots we would deadlock with fs freeze. 1959 * Currently are holding the fs freeze lock, if we do an async flush 1960 * we'll do btrfs_join_transaction() and deadlock because we need to 1961 * wait for the fs freeze lock. Using the direct flushing we benefit 1962 * from already being in a transaction and our join_transaction doesn't 1963 * have to re-take the fs freeze lock. 1964 */ 1965 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) { 1966 writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); 1967 } else { 1968 struct btrfs_pending_snapshot *pending; 1969 struct list_head *head = &trans->transaction->pending_snapshots; 1970 1971 /* 1972 * Flush dellaloc for any root that is going to be snapshotted. 1973 * This is done to avoid a corrupted version of files, in the 1974 * snapshots, that had both buffered and direct IO writes (even 1975 * if they were done sequentially) due to an unordered update of 1976 * the inode's size on disk. 1977 */ 1978 list_for_each_entry(pending, head, list) { 1979 int ret; 1980 1981 ret = btrfs_start_delalloc_snapshot(pending->root); 1982 if (ret) 1983 return ret; 1984 } 1985 } 1986 return 0; 1987 } 1988 1989 static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans) 1990 { 1991 struct btrfs_fs_info *fs_info = trans->fs_info; 1992 1993 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) { 1994 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); 1995 } else { 1996 struct btrfs_pending_snapshot *pending; 1997 struct list_head *head = &trans->transaction->pending_snapshots; 1998 1999 /* 2000 * Wait for any dellaloc that we started previously for the roots 2001 * that are going to be snapshotted. This is to avoid a corrupted 2002 * version of files in the snapshots that had both buffered and 2003 * direct IO writes (even if they were done sequentially). 2004 */ 2005 list_for_each_entry(pending, head, list) 2006 btrfs_wait_ordered_extents(pending->root, 2007 U64_MAX, 0, U64_MAX); 2008 } 2009 } 2010 2011 int btrfs_commit_transaction(struct btrfs_trans_handle *trans) 2012 { 2013 struct btrfs_fs_info *fs_info = trans->fs_info; 2014 struct btrfs_transaction *cur_trans = trans->transaction; 2015 struct btrfs_transaction *prev_trans = NULL; 2016 int ret; 2017 2018 ASSERT(refcount_read(&trans->use_count) == 1); 2019 2020 /* 2021 * Some places just start a transaction to commit it. We need to make 2022 * sure that if this commit fails that the abort code actually marks the 2023 * transaction as failed, so set trans->dirty to make the abort code do 2024 * the right thing. 2025 */ 2026 trans->dirty = true; 2027 2028 /* Stop the commit early if ->aborted is set */ 2029 if (TRANS_ABORTED(cur_trans)) { 2030 ret = cur_trans->aborted; 2031 btrfs_end_transaction(trans); 2032 return ret; 2033 } 2034 2035 btrfs_trans_release_metadata(trans); 2036 trans->block_rsv = NULL; 2037 2038 /* make a pass through all the delayed refs we have so far 2039 * any runnings procs may add more while we are here 2040 */ 2041 ret = btrfs_run_delayed_refs(trans, 0); 2042 if (ret) { 2043 btrfs_end_transaction(trans); 2044 return ret; 2045 } 2046 2047 cur_trans = trans->transaction; 2048 2049 /* 2050 * set the flushing flag so procs in this transaction have to 2051 * start sending their work down. 2052 */ 2053 cur_trans->delayed_refs.flushing = 1; 2054 smp_wmb(); 2055 2056 btrfs_create_pending_block_groups(trans); 2057 2058 ret = btrfs_run_delayed_refs(trans, 0); 2059 if (ret) { 2060 btrfs_end_transaction(trans); 2061 return ret; 2062 } 2063 2064 if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) { 2065 int run_it = 0; 2066 2067 /* this mutex is also taken before trying to set 2068 * block groups readonly. We need to make sure 2069 * that nobody has set a block group readonly 2070 * after a extents from that block group have been 2071 * allocated for cache files. btrfs_set_block_group_ro 2072 * will wait for the transaction to commit if it 2073 * finds BTRFS_TRANS_DIRTY_BG_RUN set. 2074 * 2075 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure 2076 * only one process starts all the block group IO. It wouldn't 2077 * hurt to have more than one go through, but there's no 2078 * real advantage to it either. 2079 */ 2080 mutex_lock(&fs_info->ro_block_group_mutex); 2081 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN, 2082 &cur_trans->flags)) 2083 run_it = 1; 2084 mutex_unlock(&fs_info->ro_block_group_mutex); 2085 2086 if (run_it) { 2087 ret = btrfs_start_dirty_block_groups(trans); 2088 if (ret) { 2089 btrfs_end_transaction(trans); 2090 return ret; 2091 } 2092 } 2093 } 2094 2095 spin_lock(&fs_info->trans_lock); 2096 if (cur_trans->state >= TRANS_STATE_COMMIT_START) { 2097 spin_unlock(&fs_info->trans_lock); 2098 refcount_inc(&cur_trans->use_count); 2099 ret = btrfs_end_transaction(trans); 2100 2101 wait_for_commit(cur_trans); 2102 2103 if (TRANS_ABORTED(cur_trans)) 2104 ret = cur_trans->aborted; 2105 2106 btrfs_put_transaction(cur_trans); 2107 2108 return ret; 2109 } 2110 2111 cur_trans->state = TRANS_STATE_COMMIT_START; 2112 wake_up(&fs_info->transaction_blocked_wait); 2113 2114 if (cur_trans->list.prev != &fs_info->trans_list) { 2115 prev_trans = list_entry(cur_trans->list.prev, 2116 struct btrfs_transaction, list); 2117 if (prev_trans->state != TRANS_STATE_COMPLETED) { 2118 refcount_inc(&prev_trans->use_count); 2119 spin_unlock(&fs_info->trans_lock); 2120 2121 wait_for_commit(prev_trans); 2122 ret = READ_ONCE(prev_trans->aborted); 2123 2124 btrfs_put_transaction(prev_trans); 2125 if (ret) 2126 goto cleanup_transaction; 2127 } else { 2128 spin_unlock(&fs_info->trans_lock); 2129 } 2130 } else { 2131 spin_unlock(&fs_info->trans_lock); 2132 /* 2133 * The previous transaction was aborted and was already removed 2134 * from the list of transactions at fs_info->trans_list. So we 2135 * abort to prevent writing a new superblock that reflects a 2136 * corrupt state (pointing to trees with unwritten nodes/leafs). 2137 */ 2138 if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) { 2139 ret = -EROFS; 2140 goto cleanup_transaction; 2141 } 2142 } 2143 2144 extwriter_counter_dec(cur_trans, trans->type); 2145 2146 ret = btrfs_start_delalloc_flush(trans); 2147 if (ret) 2148 goto cleanup_transaction; 2149 2150 ret = btrfs_run_delayed_items(trans); 2151 if (ret) 2152 goto cleanup_transaction; 2153 2154 wait_event(cur_trans->writer_wait, 2155 extwriter_counter_read(cur_trans) == 0); 2156 2157 /* some pending stuffs might be added after the previous flush. */ 2158 ret = btrfs_run_delayed_items(trans); 2159 if (ret) 2160 goto cleanup_transaction; 2161 2162 btrfs_wait_delalloc_flush(trans); 2163 2164 btrfs_scrub_pause(fs_info); 2165 /* 2166 * Ok now we need to make sure to block out any other joins while we 2167 * commit the transaction. We could have started a join before setting 2168 * COMMIT_DOING so make sure to wait for num_writers to == 1 again. 2169 */ 2170 spin_lock(&fs_info->trans_lock); 2171 cur_trans->state = TRANS_STATE_COMMIT_DOING; 2172 spin_unlock(&fs_info->trans_lock); 2173 wait_event(cur_trans->writer_wait, 2174 atomic_read(&cur_trans->num_writers) == 1); 2175 2176 if (TRANS_ABORTED(cur_trans)) { 2177 ret = cur_trans->aborted; 2178 goto scrub_continue; 2179 } 2180 /* 2181 * the reloc mutex makes sure that we stop 2182 * the balancing code from coming in and moving 2183 * extents around in the middle of the commit 2184 */ 2185 mutex_lock(&fs_info->reloc_mutex); 2186 2187 /* 2188 * We needn't worry about the delayed items because we will 2189 * deal with them in create_pending_snapshot(), which is the 2190 * core function of the snapshot creation. 2191 */ 2192 ret = create_pending_snapshots(trans); 2193 if (ret) 2194 goto unlock_reloc; 2195 2196 /* 2197 * We insert the dir indexes of the snapshots and update the inode 2198 * of the snapshots' parents after the snapshot creation, so there 2199 * are some delayed items which are not dealt with. Now deal with 2200 * them. 2201 * 2202 * We needn't worry that this operation will corrupt the snapshots, 2203 * because all the tree which are snapshoted will be forced to COW 2204 * the nodes and leaves. 2205 */ 2206 ret = btrfs_run_delayed_items(trans); 2207 if (ret) 2208 goto unlock_reloc; 2209 2210 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 2211 if (ret) 2212 goto unlock_reloc; 2213 2214 /* 2215 * make sure none of the code above managed to slip in a 2216 * delayed item 2217 */ 2218 btrfs_assert_delayed_root_empty(fs_info); 2219 2220 WARN_ON(cur_trans != trans->transaction); 2221 2222 /* btrfs_commit_tree_roots is responsible for getting the 2223 * various roots consistent with each other. Every pointer 2224 * in the tree of tree roots has to point to the most up to date 2225 * root for every subvolume and other tree. So, we have to keep 2226 * the tree logging code from jumping in and changing any 2227 * of the trees. 2228 * 2229 * At this point in the commit, there can't be any tree-log 2230 * writers, but a little lower down we drop the trans mutex 2231 * and let new people in. By holding the tree_log_mutex 2232 * from now until after the super is written, we avoid races 2233 * with the tree-log code. 2234 */ 2235 mutex_lock(&fs_info->tree_log_mutex); 2236 2237 ret = commit_fs_roots(trans); 2238 if (ret) 2239 goto unlock_tree_log; 2240 2241 /* 2242 * Since the transaction is done, we can apply the pending changes 2243 * before the next transaction. 2244 */ 2245 btrfs_apply_pending_changes(fs_info); 2246 2247 /* commit_fs_roots gets rid of all the tree log roots, it is now 2248 * safe to free the root of tree log roots 2249 */ 2250 btrfs_free_log_root_tree(trans, fs_info); 2251 2252 /* 2253 * commit_fs_roots() can call btrfs_save_ino_cache(), which generates 2254 * new delayed refs. Must handle them or qgroup can be wrong. 2255 */ 2256 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 2257 if (ret) 2258 goto unlock_tree_log; 2259 2260 /* 2261 * Since fs roots are all committed, we can get a quite accurate 2262 * new_roots. So let's do quota accounting. 2263 */ 2264 ret = btrfs_qgroup_account_extents(trans); 2265 if (ret < 0) 2266 goto unlock_tree_log; 2267 2268 ret = commit_cowonly_roots(trans); 2269 if (ret) 2270 goto unlock_tree_log; 2271 2272 /* 2273 * The tasks which save the space cache and inode cache may also 2274 * update ->aborted, check it. 2275 */ 2276 if (TRANS_ABORTED(cur_trans)) { 2277 ret = cur_trans->aborted; 2278 goto unlock_tree_log; 2279 } 2280 2281 btrfs_prepare_extent_commit(fs_info); 2282 2283 cur_trans = fs_info->running_transaction; 2284 2285 btrfs_set_root_node(&fs_info->tree_root->root_item, 2286 fs_info->tree_root->node); 2287 list_add_tail(&fs_info->tree_root->dirty_list, 2288 &cur_trans->switch_commits); 2289 2290 btrfs_set_root_node(&fs_info->chunk_root->root_item, 2291 fs_info->chunk_root->node); 2292 list_add_tail(&fs_info->chunk_root->dirty_list, 2293 &cur_trans->switch_commits); 2294 2295 switch_commit_roots(trans); 2296 2297 ASSERT(list_empty(&cur_trans->dirty_bgs)); 2298 ASSERT(list_empty(&cur_trans->io_bgs)); 2299 update_super_roots(fs_info); 2300 2301 btrfs_set_super_log_root(fs_info->super_copy, 0); 2302 btrfs_set_super_log_root_level(fs_info->super_copy, 0); 2303 memcpy(fs_info->super_for_commit, fs_info->super_copy, 2304 sizeof(*fs_info->super_copy)); 2305 2306 btrfs_commit_device_sizes(cur_trans); 2307 2308 clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags); 2309 clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags); 2310 2311 btrfs_trans_release_chunk_metadata(trans); 2312 2313 spin_lock(&fs_info->trans_lock); 2314 cur_trans->state = TRANS_STATE_UNBLOCKED; 2315 fs_info->running_transaction = NULL; 2316 spin_unlock(&fs_info->trans_lock); 2317 mutex_unlock(&fs_info->reloc_mutex); 2318 2319 wake_up(&fs_info->transaction_wait); 2320 2321 ret = btrfs_write_and_wait_transaction(trans); 2322 if (ret) { 2323 btrfs_handle_fs_error(fs_info, ret, 2324 "Error while writing out transaction"); 2325 /* 2326 * reloc_mutex has been unlocked, tree_log_mutex is still held 2327 * but we can't jump to unlock_tree_log causing double unlock 2328 */ 2329 mutex_unlock(&fs_info->tree_log_mutex); 2330 goto scrub_continue; 2331 } 2332 2333 ret = write_all_supers(fs_info, 0); 2334 /* 2335 * the super is written, we can safely allow the tree-loggers 2336 * to go about their business 2337 */ 2338 mutex_unlock(&fs_info->tree_log_mutex); 2339 if (ret) 2340 goto scrub_continue; 2341 2342 btrfs_finish_extent_commit(trans); 2343 2344 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags)) 2345 btrfs_clear_space_info_full(fs_info); 2346 2347 fs_info->last_trans_committed = cur_trans->transid; 2348 /* 2349 * We needn't acquire the lock here because there is no other task 2350 * which can change it. 2351 */ 2352 cur_trans->state = TRANS_STATE_COMPLETED; 2353 wake_up(&cur_trans->commit_wait); 2354 clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); 2355 2356 spin_lock(&fs_info->trans_lock); 2357 list_del_init(&cur_trans->list); 2358 spin_unlock(&fs_info->trans_lock); 2359 2360 btrfs_put_transaction(cur_trans); 2361 btrfs_put_transaction(cur_trans); 2362 2363 if (trans->type & __TRANS_FREEZABLE) 2364 sb_end_intwrite(fs_info->sb); 2365 2366 trace_btrfs_transaction_commit(trans->root); 2367 2368 btrfs_scrub_continue(fs_info); 2369 2370 if (current->journal_info == trans) 2371 current->journal_info = NULL; 2372 2373 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2374 2375 return ret; 2376 2377 unlock_tree_log: 2378 mutex_unlock(&fs_info->tree_log_mutex); 2379 unlock_reloc: 2380 mutex_unlock(&fs_info->reloc_mutex); 2381 scrub_continue: 2382 btrfs_scrub_continue(fs_info); 2383 cleanup_transaction: 2384 btrfs_trans_release_metadata(trans); 2385 btrfs_cleanup_pending_block_groups(trans); 2386 btrfs_trans_release_chunk_metadata(trans); 2387 trans->block_rsv = NULL; 2388 btrfs_warn(fs_info, "Skipping commit of aborted transaction."); 2389 if (current->journal_info == trans) 2390 current->journal_info = NULL; 2391 cleanup_transaction(trans, ret); 2392 2393 return ret; 2394 } 2395 2396 /* 2397 * return < 0 if error 2398 * 0 if there are no more dead_roots at the time of call 2399 * 1 there are more to be processed, call me again 2400 * 2401 * The return value indicates there are certainly more snapshots to delete, but 2402 * if there comes a new one during processing, it may return 0. We don't mind, 2403 * because btrfs_commit_super will poke cleaner thread and it will process it a 2404 * few seconds later. 2405 */ 2406 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root) 2407 { 2408 int ret; 2409 struct btrfs_fs_info *fs_info = root->fs_info; 2410 2411 spin_lock(&fs_info->trans_lock); 2412 if (list_empty(&fs_info->dead_roots)) { 2413 spin_unlock(&fs_info->trans_lock); 2414 return 0; 2415 } 2416 root = list_first_entry(&fs_info->dead_roots, 2417 struct btrfs_root, root_list); 2418 list_del_init(&root->root_list); 2419 spin_unlock(&fs_info->trans_lock); 2420 2421 btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid); 2422 2423 btrfs_kill_all_delayed_nodes(root); 2424 if (root->ino_cache_inode) { 2425 iput(root->ino_cache_inode); 2426 root->ino_cache_inode = NULL; 2427 } 2428 2429 if (btrfs_header_backref_rev(root->node) < 2430 BTRFS_MIXED_BACKREF_REV) 2431 ret = btrfs_drop_snapshot(root, 0, 0); 2432 else 2433 ret = btrfs_drop_snapshot(root, 1, 0); 2434 2435 btrfs_put_root(root); 2436 return (ret < 0) ? 0 : 1; 2437 } 2438 2439 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info) 2440 { 2441 unsigned long prev; 2442 unsigned long bit; 2443 2444 prev = xchg(&fs_info->pending_changes, 0); 2445 if (!prev) 2446 return; 2447 2448 bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE; 2449 if (prev & bit) 2450 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE); 2451 prev &= ~bit; 2452 2453 bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE; 2454 if (prev & bit) 2455 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE); 2456 prev &= ~bit; 2457 2458 bit = 1 << BTRFS_PENDING_COMMIT; 2459 if (prev & bit) 2460 btrfs_debug(fs_info, "pending commit done"); 2461 prev &= ~bit; 2462 2463 if (prev) 2464 btrfs_warn(fs_info, 2465 "unknown pending changes left 0x%lx, ignoring", prev); 2466 } 2467