1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/slab.h> 8 #include <linux/sched.h> 9 #include <linux/writeback.h> 10 #include <linux/pagemap.h> 11 #include <linux/blkdev.h> 12 #include <linux/uuid.h> 13 #include "misc.h" 14 #include "ctree.h" 15 #include "disk-io.h" 16 #include "transaction.h" 17 #include "locking.h" 18 #include "tree-log.h" 19 #include "inode-map.h" 20 #include "volumes.h" 21 #include "dev-replace.h" 22 #include "qgroup.h" 23 #include "block-group.h" 24 25 #define BTRFS_ROOT_TRANS_TAG 0 26 27 /* 28 * Transaction states and transitions 29 * 30 * No running transaction (fs tree blocks are not modified) 31 * | 32 * | To next stage: 33 * | Call start_transaction() variants. Except btrfs_join_transaction_nostart(). 34 * V 35 * Transaction N [[TRANS_STATE_RUNNING]] 36 * | 37 * | New trans handles can be attached to transaction N by calling all 38 * | start_transaction() variants. 39 * | 40 * | To next stage: 41 * | Call btrfs_commit_transaction() on any trans handle attached to 42 * | transaction N 43 * V 44 * Transaction N [[TRANS_STATE_COMMIT_START]] 45 * | 46 * | Will wait for previous running transaction to completely finish if there 47 * | is one 48 * | 49 * | Then one of the following happes: 50 * | - Wait for all other trans handle holders to release. 51 * | The btrfs_commit_transaction() caller will do the commit work. 52 * | - Wait for current transaction to be committed by others. 53 * | Other btrfs_commit_transaction() caller will do the commit work. 54 * | 55 * | At this stage, only btrfs_join_transaction*() variants can attach 56 * | to this running transaction. 57 * | All other variants will wait for current one to finish and attach to 58 * | transaction N+1. 59 * | 60 * | To next stage: 61 * | Caller is chosen to commit transaction N, and all other trans handle 62 * | haven been released. 63 * V 64 * Transaction N [[TRANS_STATE_COMMIT_DOING]] 65 * | 66 * | The heavy lifting transaction work is started. 67 * | From running delayed refs (modifying extent tree) to creating pending 68 * | snapshots, running qgroups. 69 * | In short, modify supporting trees to reflect modifications of subvolume 70 * | trees. 71 * | 72 * | At this stage, all start_transaction() calls will wait for this 73 * | transaction to finish and attach to transaction N+1. 74 * | 75 * | To next stage: 76 * | Until all supporting trees are updated. 77 * V 78 * Transaction N [[TRANS_STATE_UNBLOCKED]] 79 * | Transaction N+1 80 * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]] 81 * | need to write them back to disk and update | 82 * | super blocks. | 83 * | | 84 * | At this stage, new transaction is allowed to | 85 * | start. | 86 * | All new start_transaction() calls will be | 87 * | attached to transid N+1. | 88 * | | 89 * | To next stage: | 90 * | Until all tree blocks are super blocks are | 91 * | written to block devices | 92 * V | 93 * Transaction N [[TRANS_STATE_COMPLETED]] V 94 * All tree blocks and super blocks are written. Transaction N+1 95 * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]] 96 * data structures will be cleaned up. | Life goes on 97 */ 98 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { 99 [TRANS_STATE_RUNNING] = 0U, 100 [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), 101 [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | 102 __TRANS_ATTACH | 103 __TRANS_JOIN | 104 __TRANS_JOIN_NOSTART), 105 [TRANS_STATE_UNBLOCKED] = (__TRANS_START | 106 __TRANS_ATTACH | 107 __TRANS_JOIN | 108 __TRANS_JOIN_NOLOCK | 109 __TRANS_JOIN_NOSTART), 110 [TRANS_STATE_COMPLETED] = (__TRANS_START | 111 __TRANS_ATTACH | 112 __TRANS_JOIN | 113 __TRANS_JOIN_NOLOCK | 114 __TRANS_JOIN_NOSTART), 115 }; 116 117 void btrfs_put_transaction(struct btrfs_transaction *transaction) 118 { 119 WARN_ON(refcount_read(&transaction->use_count) == 0); 120 if (refcount_dec_and_test(&transaction->use_count)) { 121 BUG_ON(!list_empty(&transaction->list)); 122 WARN_ON(!RB_EMPTY_ROOT( 123 &transaction->delayed_refs.href_root.rb_root)); 124 if (transaction->delayed_refs.pending_csums) 125 btrfs_err(transaction->fs_info, 126 "pending csums is %llu", 127 transaction->delayed_refs.pending_csums); 128 /* 129 * If any block groups are found in ->deleted_bgs then it's 130 * because the transaction was aborted and a commit did not 131 * happen (things failed before writing the new superblock 132 * and calling btrfs_finish_extent_commit()), so we can not 133 * discard the physical locations of the block groups. 134 */ 135 while (!list_empty(&transaction->deleted_bgs)) { 136 struct btrfs_block_group *cache; 137 138 cache = list_first_entry(&transaction->deleted_bgs, 139 struct btrfs_block_group, 140 bg_list); 141 list_del_init(&cache->bg_list); 142 btrfs_put_block_group_trimming(cache); 143 btrfs_put_block_group(cache); 144 } 145 WARN_ON(!list_empty(&transaction->dev_update_list)); 146 kfree(transaction); 147 } 148 } 149 150 static noinline void switch_commit_roots(struct btrfs_transaction *trans) 151 { 152 struct btrfs_fs_info *fs_info = trans->fs_info; 153 struct btrfs_root *root, *tmp; 154 155 down_write(&fs_info->commit_root_sem); 156 list_for_each_entry_safe(root, tmp, &trans->switch_commits, 157 dirty_list) { 158 list_del_init(&root->dirty_list); 159 free_extent_buffer(root->commit_root); 160 root->commit_root = btrfs_root_node(root); 161 if (is_fstree(root->root_key.objectid)) 162 btrfs_unpin_free_ino(root); 163 extent_io_tree_release(&root->dirty_log_pages); 164 btrfs_qgroup_clean_swapped_blocks(root); 165 } 166 167 /* We can free old roots now. */ 168 spin_lock(&trans->dropped_roots_lock); 169 while (!list_empty(&trans->dropped_roots)) { 170 root = list_first_entry(&trans->dropped_roots, 171 struct btrfs_root, root_list); 172 list_del_init(&root->root_list); 173 spin_unlock(&trans->dropped_roots_lock); 174 btrfs_drop_and_free_fs_root(fs_info, root); 175 spin_lock(&trans->dropped_roots_lock); 176 } 177 spin_unlock(&trans->dropped_roots_lock); 178 up_write(&fs_info->commit_root_sem); 179 } 180 181 static inline void extwriter_counter_inc(struct btrfs_transaction *trans, 182 unsigned int type) 183 { 184 if (type & TRANS_EXTWRITERS) 185 atomic_inc(&trans->num_extwriters); 186 } 187 188 static inline void extwriter_counter_dec(struct btrfs_transaction *trans, 189 unsigned int type) 190 { 191 if (type & TRANS_EXTWRITERS) 192 atomic_dec(&trans->num_extwriters); 193 } 194 195 static inline void extwriter_counter_init(struct btrfs_transaction *trans, 196 unsigned int type) 197 { 198 atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0)); 199 } 200 201 static inline int extwriter_counter_read(struct btrfs_transaction *trans) 202 { 203 return atomic_read(&trans->num_extwriters); 204 } 205 206 /* 207 * To be called after all the new block groups attached to the transaction 208 * handle have been created (btrfs_create_pending_block_groups()). 209 */ 210 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) 211 { 212 struct btrfs_fs_info *fs_info = trans->fs_info; 213 214 if (!trans->chunk_bytes_reserved) 215 return; 216 217 WARN_ON_ONCE(!list_empty(&trans->new_bgs)); 218 219 btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv, 220 trans->chunk_bytes_reserved); 221 trans->chunk_bytes_reserved = 0; 222 } 223 224 /* 225 * either allocate a new transaction or hop into the existing one 226 */ 227 static noinline int join_transaction(struct btrfs_fs_info *fs_info, 228 unsigned int type) 229 { 230 struct btrfs_transaction *cur_trans; 231 232 spin_lock(&fs_info->trans_lock); 233 loop: 234 /* The file system has been taken offline. No new transactions. */ 235 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 236 spin_unlock(&fs_info->trans_lock); 237 return -EROFS; 238 } 239 240 cur_trans = fs_info->running_transaction; 241 if (cur_trans) { 242 if (cur_trans->aborted) { 243 spin_unlock(&fs_info->trans_lock); 244 return cur_trans->aborted; 245 } 246 if (btrfs_blocked_trans_types[cur_trans->state] & type) { 247 spin_unlock(&fs_info->trans_lock); 248 return -EBUSY; 249 } 250 refcount_inc(&cur_trans->use_count); 251 atomic_inc(&cur_trans->num_writers); 252 extwriter_counter_inc(cur_trans, type); 253 spin_unlock(&fs_info->trans_lock); 254 return 0; 255 } 256 spin_unlock(&fs_info->trans_lock); 257 258 /* 259 * If we are ATTACH, we just want to catch the current transaction, 260 * and commit it. If there is no transaction, just return ENOENT. 261 */ 262 if (type == TRANS_ATTACH) 263 return -ENOENT; 264 265 /* 266 * JOIN_NOLOCK only happens during the transaction commit, so 267 * it is impossible that ->running_transaction is NULL 268 */ 269 BUG_ON(type == TRANS_JOIN_NOLOCK); 270 271 cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS); 272 if (!cur_trans) 273 return -ENOMEM; 274 275 spin_lock(&fs_info->trans_lock); 276 if (fs_info->running_transaction) { 277 /* 278 * someone started a transaction after we unlocked. Make sure 279 * to redo the checks above 280 */ 281 kfree(cur_trans); 282 goto loop; 283 } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 284 spin_unlock(&fs_info->trans_lock); 285 kfree(cur_trans); 286 return -EROFS; 287 } 288 289 cur_trans->fs_info = fs_info; 290 atomic_set(&cur_trans->num_writers, 1); 291 extwriter_counter_init(cur_trans, type); 292 init_waitqueue_head(&cur_trans->writer_wait); 293 init_waitqueue_head(&cur_trans->commit_wait); 294 cur_trans->state = TRANS_STATE_RUNNING; 295 /* 296 * One for this trans handle, one so it will live on until we 297 * commit the transaction. 298 */ 299 refcount_set(&cur_trans->use_count, 2); 300 cur_trans->flags = 0; 301 cur_trans->start_time = ktime_get_seconds(); 302 303 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs)); 304 305 cur_trans->delayed_refs.href_root = RB_ROOT_CACHED; 306 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT; 307 atomic_set(&cur_trans->delayed_refs.num_entries, 0); 308 309 /* 310 * although the tree mod log is per file system and not per transaction, 311 * the log must never go across transaction boundaries. 312 */ 313 smp_mb(); 314 if (!list_empty(&fs_info->tree_mod_seq_list)) 315 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n"); 316 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) 317 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n"); 318 atomic64_set(&fs_info->tree_mod_seq, 0); 319 320 spin_lock_init(&cur_trans->delayed_refs.lock); 321 322 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 323 INIT_LIST_HEAD(&cur_trans->dev_update_list); 324 INIT_LIST_HEAD(&cur_trans->switch_commits); 325 INIT_LIST_HEAD(&cur_trans->dirty_bgs); 326 INIT_LIST_HEAD(&cur_trans->io_bgs); 327 INIT_LIST_HEAD(&cur_trans->dropped_roots); 328 mutex_init(&cur_trans->cache_write_mutex); 329 spin_lock_init(&cur_trans->dirty_bgs_lock); 330 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 331 spin_lock_init(&cur_trans->dropped_roots_lock); 332 list_add_tail(&cur_trans->list, &fs_info->trans_list); 333 extent_io_tree_init(fs_info, &cur_trans->dirty_pages, 334 IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode); 335 fs_info->generation++; 336 cur_trans->transid = fs_info->generation; 337 fs_info->running_transaction = cur_trans; 338 cur_trans->aborted = 0; 339 spin_unlock(&fs_info->trans_lock); 340 341 return 0; 342 } 343 344 /* 345 * this does all the record keeping required to make sure that a reference 346 * counted root is properly recorded in a given transaction. This is required 347 * to make sure the old root from before we joined the transaction is deleted 348 * when the transaction commits 349 */ 350 static int record_root_in_trans(struct btrfs_trans_handle *trans, 351 struct btrfs_root *root, 352 int force) 353 { 354 struct btrfs_fs_info *fs_info = root->fs_info; 355 356 if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 357 root->last_trans < trans->transid) || force) { 358 WARN_ON(root == fs_info->extent_root); 359 WARN_ON(!force && root->commit_root != root->node); 360 361 /* 362 * see below for IN_TRANS_SETUP usage rules 363 * we have the reloc mutex held now, so there 364 * is only one writer in this function 365 */ 366 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 367 368 /* make sure readers find IN_TRANS_SETUP before 369 * they find our root->last_trans update 370 */ 371 smp_wmb(); 372 373 spin_lock(&fs_info->fs_roots_radix_lock); 374 if (root->last_trans == trans->transid && !force) { 375 spin_unlock(&fs_info->fs_roots_radix_lock); 376 return 0; 377 } 378 radix_tree_tag_set(&fs_info->fs_roots_radix, 379 (unsigned long)root->root_key.objectid, 380 BTRFS_ROOT_TRANS_TAG); 381 spin_unlock(&fs_info->fs_roots_radix_lock); 382 root->last_trans = trans->transid; 383 384 /* this is pretty tricky. We don't want to 385 * take the relocation lock in btrfs_record_root_in_trans 386 * unless we're really doing the first setup for this root in 387 * this transaction. 388 * 389 * Normally we'd use root->last_trans as a flag to decide 390 * if we want to take the expensive mutex. 391 * 392 * But, we have to set root->last_trans before we 393 * init the relocation root, otherwise, we trip over warnings 394 * in ctree.c. The solution used here is to flag ourselves 395 * with root IN_TRANS_SETUP. When this is 1, we're still 396 * fixing up the reloc trees and everyone must wait. 397 * 398 * When this is zero, they can trust root->last_trans and fly 399 * through btrfs_record_root_in_trans without having to take the 400 * lock. smp_wmb() makes sure that all the writes above are 401 * done before we pop in the zero below 402 */ 403 btrfs_init_reloc_root(trans, root); 404 smp_mb__before_atomic(); 405 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 406 } 407 return 0; 408 } 409 410 411 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, 412 struct btrfs_root *root) 413 { 414 struct btrfs_fs_info *fs_info = root->fs_info; 415 struct btrfs_transaction *cur_trans = trans->transaction; 416 417 /* Add ourselves to the transaction dropped list */ 418 spin_lock(&cur_trans->dropped_roots_lock); 419 list_add_tail(&root->root_list, &cur_trans->dropped_roots); 420 spin_unlock(&cur_trans->dropped_roots_lock); 421 422 /* Make sure we don't try to update the root at commit time */ 423 spin_lock(&fs_info->fs_roots_radix_lock); 424 radix_tree_tag_clear(&fs_info->fs_roots_radix, 425 (unsigned long)root->root_key.objectid, 426 BTRFS_ROOT_TRANS_TAG); 427 spin_unlock(&fs_info->fs_roots_radix_lock); 428 } 429 430 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 431 struct btrfs_root *root) 432 { 433 struct btrfs_fs_info *fs_info = root->fs_info; 434 435 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 436 return 0; 437 438 /* 439 * see record_root_in_trans for comments about IN_TRANS_SETUP usage 440 * and barriers 441 */ 442 smp_rmb(); 443 if (root->last_trans == trans->transid && 444 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) 445 return 0; 446 447 mutex_lock(&fs_info->reloc_mutex); 448 record_root_in_trans(trans, root, 0); 449 mutex_unlock(&fs_info->reloc_mutex); 450 451 return 0; 452 } 453 454 static inline int is_transaction_blocked(struct btrfs_transaction *trans) 455 { 456 return (trans->state >= TRANS_STATE_COMMIT_START && 457 trans->state < TRANS_STATE_UNBLOCKED && 458 !trans->aborted); 459 } 460 461 /* wait for commit against the current transaction to become unblocked 462 * when this is done, it is safe to start a new transaction, but the current 463 * transaction might not be fully on disk. 464 */ 465 static void wait_current_trans(struct btrfs_fs_info *fs_info) 466 { 467 struct btrfs_transaction *cur_trans; 468 469 spin_lock(&fs_info->trans_lock); 470 cur_trans = fs_info->running_transaction; 471 if (cur_trans && is_transaction_blocked(cur_trans)) { 472 refcount_inc(&cur_trans->use_count); 473 spin_unlock(&fs_info->trans_lock); 474 475 wait_event(fs_info->transaction_wait, 476 cur_trans->state >= TRANS_STATE_UNBLOCKED || 477 cur_trans->aborted); 478 btrfs_put_transaction(cur_trans); 479 } else { 480 spin_unlock(&fs_info->trans_lock); 481 } 482 } 483 484 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type) 485 { 486 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 487 return 0; 488 489 if (type == TRANS_START) 490 return 1; 491 492 return 0; 493 } 494 495 static inline bool need_reserve_reloc_root(struct btrfs_root *root) 496 { 497 struct btrfs_fs_info *fs_info = root->fs_info; 498 499 if (!fs_info->reloc_ctl || 500 !test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 501 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 502 root->reloc_root) 503 return false; 504 505 return true; 506 } 507 508 static struct btrfs_trans_handle * 509 start_transaction(struct btrfs_root *root, unsigned int num_items, 510 unsigned int type, enum btrfs_reserve_flush_enum flush, 511 bool enforce_qgroups) 512 { 513 struct btrfs_fs_info *fs_info = root->fs_info; 514 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 515 struct btrfs_trans_handle *h; 516 struct btrfs_transaction *cur_trans; 517 u64 num_bytes = 0; 518 u64 qgroup_reserved = 0; 519 bool reloc_reserved = false; 520 int ret; 521 522 /* Send isn't supposed to start transactions. */ 523 ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB); 524 525 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 526 return ERR_PTR(-EROFS); 527 528 if (current->journal_info) { 529 WARN_ON(type & TRANS_EXTWRITERS); 530 h = current->journal_info; 531 refcount_inc(&h->use_count); 532 WARN_ON(refcount_read(&h->use_count) > 2); 533 h->orig_rsv = h->block_rsv; 534 h->block_rsv = NULL; 535 goto got_it; 536 } 537 538 /* 539 * Do the reservation before we join the transaction so we can do all 540 * the appropriate flushing if need be. 541 */ 542 if (num_items && root != fs_info->chunk_root) { 543 struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv; 544 u64 delayed_refs_bytes = 0; 545 546 qgroup_reserved = num_items * fs_info->nodesize; 547 ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved, 548 enforce_qgroups); 549 if (ret) 550 return ERR_PTR(ret); 551 552 /* 553 * We want to reserve all the bytes we may need all at once, so 554 * we only do 1 enospc flushing cycle per transaction start. We 555 * accomplish this by simply assuming we'll do 2 x num_items 556 * worth of delayed refs updates in this trans handle, and 557 * refill that amount for whatever is missing in the reserve. 558 */ 559 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); 560 if (delayed_refs_rsv->full == 0) { 561 delayed_refs_bytes = num_bytes; 562 num_bytes <<= 1; 563 } 564 565 /* 566 * Do the reservation for the relocation root creation 567 */ 568 if (need_reserve_reloc_root(root)) { 569 num_bytes += fs_info->nodesize; 570 reloc_reserved = true; 571 } 572 573 ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush); 574 if (ret) 575 goto reserve_fail; 576 if (delayed_refs_bytes) { 577 btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv, 578 delayed_refs_bytes); 579 num_bytes -= delayed_refs_bytes; 580 } 581 } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && 582 !delayed_refs_rsv->full) { 583 /* 584 * Some people call with btrfs_start_transaction(root, 0) 585 * because they can be throttled, but have some other mechanism 586 * for reserving space. We still want these guys to refill the 587 * delayed block_rsv so just add 1 items worth of reservation 588 * here. 589 */ 590 ret = btrfs_delayed_refs_rsv_refill(fs_info, flush); 591 if (ret) 592 goto reserve_fail; 593 } 594 again: 595 h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS); 596 if (!h) { 597 ret = -ENOMEM; 598 goto alloc_fail; 599 } 600 601 /* 602 * If we are JOIN_NOLOCK we're already committing a transaction and 603 * waiting on this guy, so we don't need to do the sb_start_intwrite 604 * because we're already holding a ref. We need this because we could 605 * have raced in and did an fsync() on a file which can kick a commit 606 * and then we deadlock with somebody doing a freeze. 607 * 608 * If we are ATTACH, it means we just want to catch the current 609 * transaction and commit it, so we needn't do sb_start_intwrite(). 610 */ 611 if (type & __TRANS_FREEZABLE) 612 sb_start_intwrite(fs_info->sb); 613 614 if (may_wait_transaction(fs_info, type)) 615 wait_current_trans(fs_info); 616 617 do { 618 ret = join_transaction(fs_info, type); 619 if (ret == -EBUSY) { 620 wait_current_trans(fs_info); 621 if (unlikely(type == TRANS_ATTACH || 622 type == TRANS_JOIN_NOSTART)) 623 ret = -ENOENT; 624 } 625 } while (ret == -EBUSY); 626 627 if (ret < 0) 628 goto join_fail; 629 630 cur_trans = fs_info->running_transaction; 631 632 h->transid = cur_trans->transid; 633 h->transaction = cur_trans; 634 h->root = root; 635 refcount_set(&h->use_count, 1); 636 h->fs_info = root->fs_info; 637 638 h->type = type; 639 h->can_flush_pending_bgs = true; 640 INIT_LIST_HEAD(&h->new_bgs); 641 642 smp_mb(); 643 if (cur_trans->state >= TRANS_STATE_COMMIT_START && 644 may_wait_transaction(fs_info, type)) { 645 current->journal_info = h; 646 btrfs_commit_transaction(h); 647 goto again; 648 } 649 650 if (num_bytes) { 651 trace_btrfs_space_reservation(fs_info, "transaction", 652 h->transid, num_bytes, 1); 653 h->block_rsv = &fs_info->trans_block_rsv; 654 h->bytes_reserved = num_bytes; 655 h->reloc_reserved = reloc_reserved; 656 } 657 658 got_it: 659 btrfs_record_root_in_trans(h, root); 660 661 if (!current->journal_info) 662 current->journal_info = h; 663 return h; 664 665 join_fail: 666 if (type & __TRANS_FREEZABLE) 667 sb_end_intwrite(fs_info->sb); 668 kmem_cache_free(btrfs_trans_handle_cachep, h); 669 alloc_fail: 670 if (num_bytes) 671 btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv, 672 num_bytes); 673 reserve_fail: 674 btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved); 675 return ERR_PTR(ret); 676 } 677 678 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 679 unsigned int num_items) 680 { 681 return start_transaction(root, num_items, TRANS_START, 682 BTRFS_RESERVE_FLUSH_ALL, true); 683 } 684 685 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( 686 struct btrfs_root *root, 687 unsigned int num_items, 688 int min_factor) 689 { 690 struct btrfs_fs_info *fs_info = root->fs_info; 691 struct btrfs_trans_handle *trans; 692 u64 num_bytes; 693 int ret; 694 695 /* 696 * We have two callers: unlink and block group removal. The 697 * former should succeed even if we will temporarily exceed 698 * quota and the latter operates on the extent root so 699 * qgroup enforcement is ignored anyway. 700 */ 701 trans = start_transaction(root, num_items, TRANS_START, 702 BTRFS_RESERVE_FLUSH_ALL, false); 703 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 704 return trans; 705 706 trans = btrfs_start_transaction(root, 0); 707 if (IS_ERR(trans)) 708 return trans; 709 710 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); 711 ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv, 712 num_bytes, min_factor); 713 if (ret) { 714 btrfs_end_transaction(trans); 715 return ERR_PTR(ret); 716 } 717 718 trans->block_rsv = &fs_info->trans_block_rsv; 719 trans->bytes_reserved = num_bytes; 720 trace_btrfs_space_reservation(fs_info, "transaction", 721 trans->transid, num_bytes, 1); 722 723 return trans; 724 } 725 726 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 727 { 728 return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH, 729 true); 730 } 731 732 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root) 733 { 734 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 735 BTRFS_RESERVE_NO_FLUSH, true); 736 } 737 738 /* 739 * Similar to regular join but it never starts a transaction when none is 740 * running or after waiting for the current one to finish. 741 */ 742 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root) 743 { 744 return start_transaction(root, 0, TRANS_JOIN_NOSTART, 745 BTRFS_RESERVE_NO_FLUSH, true); 746 } 747 748 /* 749 * btrfs_attach_transaction() - catch the running transaction 750 * 751 * It is used when we want to commit the current the transaction, but 752 * don't want to start a new one. 753 * 754 * Note: If this function return -ENOENT, it just means there is no 755 * running transaction. But it is possible that the inactive transaction 756 * is still in the memory, not fully on disk. If you hope there is no 757 * inactive transaction in the fs when -ENOENT is returned, you should 758 * invoke 759 * btrfs_attach_transaction_barrier() 760 */ 761 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) 762 { 763 return start_transaction(root, 0, TRANS_ATTACH, 764 BTRFS_RESERVE_NO_FLUSH, true); 765 } 766 767 /* 768 * btrfs_attach_transaction_barrier() - catch the running transaction 769 * 770 * It is similar to the above function, the difference is this one 771 * will wait for all the inactive transactions until they fully 772 * complete. 773 */ 774 struct btrfs_trans_handle * 775 btrfs_attach_transaction_barrier(struct btrfs_root *root) 776 { 777 struct btrfs_trans_handle *trans; 778 779 trans = start_transaction(root, 0, TRANS_ATTACH, 780 BTRFS_RESERVE_NO_FLUSH, true); 781 if (trans == ERR_PTR(-ENOENT)) 782 btrfs_wait_for_commit(root->fs_info, 0); 783 784 return trans; 785 } 786 787 /* wait for a transaction commit to be fully complete */ 788 static noinline void wait_for_commit(struct btrfs_transaction *commit) 789 { 790 wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED); 791 } 792 793 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid) 794 { 795 struct btrfs_transaction *cur_trans = NULL, *t; 796 int ret = 0; 797 798 if (transid) { 799 if (transid <= fs_info->last_trans_committed) 800 goto out; 801 802 /* find specified transaction */ 803 spin_lock(&fs_info->trans_lock); 804 list_for_each_entry(t, &fs_info->trans_list, list) { 805 if (t->transid == transid) { 806 cur_trans = t; 807 refcount_inc(&cur_trans->use_count); 808 ret = 0; 809 break; 810 } 811 if (t->transid > transid) { 812 ret = 0; 813 break; 814 } 815 } 816 spin_unlock(&fs_info->trans_lock); 817 818 /* 819 * The specified transaction doesn't exist, or we 820 * raced with btrfs_commit_transaction 821 */ 822 if (!cur_trans) { 823 if (transid > fs_info->last_trans_committed) 824 ret = -EINVAL; 825 goto out; 826 } 827 } else { 828 /* find newest transaction that is committing | committed */ 829 spin_lock(&fs_info->trans_lock); 830 list_for_each_entry_reverse(t, &fs_info->trans_list, 831 list) { 832 if (t->state >= TRANS_STATE_COMMIT_START) { 833 if (t->state == TRANS_STATE_COMPLETED) 834 break; 835 cur_trans = t; 836 refcount_inc(&cur_trans->use_count); 837 break; 838 } 839 } 840 spin_unlock(&fs_info->trans_lock); 841 if (!cur_trans) 842 goto out; /* nothing committing|committed */ 843 } 844 845 wait_for_commit(cur_trans); 846 btrfs_put_transaction(cur_trans); 847 out: 848 return ret; 849 } 850 851 void btrfs_throttle(struct btrfs_fs_info *fs_info) 852 { 853 wait_current_trans(fs_info); 854 } 855 856 static int should_end_transaction(struct btrfs_trans_handle *trans) 857 { 858 struct btrfs_fs_info *fs_info = trans->fs_info; 859 860 if (btrfs_check_space_for_delayed_refs(fs_info)) 861 return 1; 862 863 return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5); 864 } 865 866 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans) 867 { 868 struct btrfs_transaction *cur_trans = trans->transaction; 869 870 smp_mb(); 871 if (cur_trans->state >= TRANS_STATE_COMMIT_START || 872 cur_trans->delayed_refs.flushing) 873 return 1; 874 875 return should_end_transaction(trans); 876 } 877 878 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans) 879 880 { 881 struct btrfs_fs_info *fs_info = trans->fs_info; 882 883 if (!trans->block_rsv) { 884 ASSERT(!trans->bytes_reserved); 885 return; 886 } 887 888 if (!trans->bytes_reserved) 889 return; 890 891 ASSERT(trans->block_rsv == &fs_info->trans_block_rsv); 892 trace_btrfs_space_reservation(fs_info, "transaction", 893 trans->transid, trans->bytes_reserved, 0); 894 btrfs_block_rsv_release(fs_info, trans->block_rsv, 895 trans->bytes_reserved); 896 trans->bytes_reserved = 0; 897 } 898 899 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 900 int throttle) 901 { 902 struct btrfs_fs_info *info = trans->fs_info; 903 struct btrfs_transaction *cur_trans = trans->transaction; 904 int err = 0; 905 906 if (refcount_read(&trans->use_count) > 1) { 907 refcount_dec(&trans->use_count); 908 trans->block_rsv = trans->orig_rsv; 909 return 0; 910 } 911 912 btrfs_trans_release_metadata(trans); 913 trans->block_rsv = NULL; 914 915 btrfs_create_pending_block_groups(trans); 916 917 btrfs_trans_release_chunk_metadata(trans); 918 919 if (trans->type & __TRANS_FREEZABLE) 920 sb_end_intwrite(info->sb); 921 922 WARN_ON(cur_trans != info->running_transaction); 923 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 924 atomic_dec(&cur_trans->num_writers); 925 extwriter_counter_dec(cur_trans, trans->type); 926 927 cond_wake_up(&cur_trans->writer_wait); 928 btrfs_put_transaction(cur_trans); 929 930 if (current->journal_info == trans) 931 current->journal_info = NULL; 932 933 if (throttle) 934 btrfs_run_delayed_iputs(info); 935 936 if (trans->aborted || 937 test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) { 938 wake_up_process(info->transaction_kthread); 939 err = -EIO; 940 } 941 942 kmem_cache_free(btrfs_trans_handle_cachep, trans); 943 return err; 944 } 945 946 int btrfs_end_transaction(struct btrfs_trans_handle *trans) 947 { 948 return __btrfs_end_transaction(trans, 0); 949 } 950 951 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans) 952 { 953 return __btrfs_end_transaction(trans, 1); 954 } 955 956 /* 957 * when btree blocks are allocated, they have some corresponding bits set for 958 * them in one of two extent_io trees. This is used to make sure all of 959 * those extents are sent to disk but does not wait on them 960 */ 961 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, 962 struct extent_io_tree *dirty_pages, int mark) 963 { 964 int err = 0; 965 int werr = 0; 966 struct address_space *mapping = fs_info->btree_inode->i_mapping; 967 struct extent_state *cached_state = NULL; 968 u64 start = 0; 969 u64 end; 970 971 atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers); 972 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 973 mark, &cached_state)) { 974 bool wait_writeback = false; 975 976 err = convert_extent_bit(dirty_pages, start, end, 977 EXTENT_NEED_WAIT, 978 mark, &cached_state); 979 /* 980 * convert_extent_bit can return -ENOMEM, which is most of the 981 * time a temporary error. So when it happens, ignore the error 982 * and wait for writeback of this range to finish - because we 983 * failed to set the bit EXTENT_NEED_WAIT for the range, a call 984 * to __btrfs_wait_marked_extents() would not know that 985 * writeback for this range started and therefore wouldn't 986 * wait for it to finish - we don't want to commit a 987 * superblock that points to btree nodes/leafs for which 988 * writeback hasn't finished yet (and without errors). 989 * We cleanup any entries left in the io tree when committing 990 * the transaction (through extent_io_tree_release()). 991 */ 992 if (err == -ENOMEM) { 993 err = 0; 994 wait_writeback = true; 995 } 996 if (!err) 997 err = filemap_fdatawrite_range(mapping, start, end); 998 if (err) 999 werr = err; 1000 else if (wait_writeback) 1001 werr = filemap_fdatawait_range(mapping, start, end); 1002 free_extent_state(cached_state); 1003 cached_state = NULL; 1004 cond_resched(); 1005 start = end + 1; 1006 } 1007 atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers); 1008 return werr; 1009 } 1010 1011 /* 1012 * when btree blocks are allocated, they have some corresponding bits set for 1013 * them in one of two extent_io trees. This is used to make sure all of 1014 * those extents are on disk for transaction or log commit. We wait 1015 * on all the pages and clear them from the dirty pages state tree 1016 */ 1017 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info, 1018 struct extent_io_tree *dirty_pages) 1019 { 1020 int err = 0; 1021 int werr = 0; 1022 struct address_space *mapping = fs_info->btree_inode->i_mapping; 1023 struct extent_state *cached_state = NULL; 1024 u64 start = 0; 1025 u64 end; 1026 1027 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 1028 EXTENT_NEED_WAIT, &cached_state)) { 1029 /* 1030 * Ignore -ENOMEM errors returned by clear_extent_bit(). 1031 * When committing the transaction, we'll remove any entries 1032 * left in the io tree. For a log commit, we don't remove them 1033 * after committing the log because the tree can be accessed 1034 * concurrently - we do it only at transaction commit time when 1035 * it's safe to do it (through extent_io_tree_release()). 1036 */ 1037 err = clear_extent_bit(dirty_pages, start, end, 1038 EXTENT_NEED_WAIT, 0, 0, &cached_state); 1039 if (err == -ENOMEM) 1040 err = 0; 1041 if (!err) 1042 err = filemap_fdatawait_range(mapping, start, end); 1043 if (err) 1044 werr = err; 1045 free_extent_state(cached_state); 1046 cached_state = NULL; 1047 cond_resched(); 1048 start = end + 1; 1049 } 1050 if (err) 1051 werr = err; 1052 return werr; 1053 } 1054 1055 static int btrfs_wait_extents(struct btrfs_fs_info *fs_info, 1056 struct extent_io_tree *dirty_pages) 1057 { 1058 bool errors = false; 1059 int err; 1060 1061 err = __btrfs_wait_marked_extents(fs_info, dirty_pages); 1062 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags)) 1063 errors = true; 1064 1065 if (errors && !err) 1066 err = -EIO; 1067 return err; 1068 } 1069 1070 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark) 1071 { 1072 struct btrfs_fs_info *fs_info = log_root->fs_info; 1073 struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages; 1074 bool errors = false; 1075 int err; 1076 1077 ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 1078 1079 err = __btrfs_wait_marked_extents(fs_info, dirty_pages); 1080 if ((mark & EXTENT_DIRTY) && 1081 test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags)) 1082 errors = true; 1083 1084 if ((mark & EXTENT_NEW) && 1085 test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags)) 1086 errors = true; 1087 1088 if (errors && !err) 1089 err = -EIO; 1090 return err; 1091 } 1092 1093 /* 1094 * When btree blocks are allocated the corresponding extents are marked dirty. 1095 * This function ensures such extents are persisted on disk for transaction or 1096 * log commit. 1097 * 1098 * @trans: transaction whose dirty pages we'd like to write 1099 */ 1100 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans) 1101 { 1102 int ret; 1103 int ret2; 1104 struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages; 1105 struct btrfs_fs_info *fs_info = trans->fs_info; 1106 struct blk_plug plug; 1107 1108 blk_start_plug(&plug); 1109 ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY); 1110 blk_finish_plug(&plug); 1111 ret2 = btrfs_wait_extents(fs_info, dirty_pages); 1112 1113 extent_io_tree_release(&trans->transaction->dirty_pages); 1114 1115 if (ret) 1116 return ret; 1117 else if (ret2) 1118 return ret2; 1119 else 1120 return 0; 1121 } 1122 1123 /* 1124 * this is used to update the root pointer in the tree of tree roots. 1125 * 1126 * But, in the case of the extent allocation tree, updating the root 1127 * pointer may allocate blocks which may change the root of the extent 1128 * allocation tree. 1129 * 1130 * So, this loops and repeats and makes sure the cowonly root didn't 1131 * change while the root pointer was being updated in the metadata. 1132 */ 1133 static int update_cowonly_root(struct btrfs_trans_handle *trans, 1134 struct btrfs_root *root) 1135 { 1136 int ret; 1137 u64 old_root_bytenr; 1138 u64 old_root_used; 1139 struct btrfs_fs_info *fs_info = root->fs_info; 1140 struct btrfs_root *tree_root = fs_info->tree_root; 1141 1142 old_root_used = btrfs_root_used(&root->root_item); 1143 1144 while (1) { 1145 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 1146 if (old_root_bytenr == root->node->start && 1147 old_root_used == btrfs_root_used(&root->root_item)) 1148 break; 1149 1150 btrfs_set_root_node(&root->root_item, root->node); 1151 ret = btrfs_update_root(trans, tree_root, 1152 &root->root_key, 1153 &root->root_item); 1154 if (ret) 1155 return ret; 1156 1157 old_root_used = btrfs_root_used(&root->root_item); 1158 } 1159 1160 return 0; 1161 } 1162 1163 /* 1164 * update all the cowonly tree roots on disk 1165 * 1166 * The error handling in this function may not be obvious. Any of the 1167 * failures will cause the file system to go offline. We still need 1168 * to clean up the delayed refs. 1169 */ 1170 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans) 1171 { 1172 struct btrfs_fs_info *fs_info = trans->fs_info; 1173 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; 1174 struct list_head *io_bgs = &trans->transaction->io_bgs; 1175 struct list_head *next; 1176 struct extent_buffer *eb; 1177 int ret; 1178 1179 eb = btrfs_lock_root_node(fs_info->tree_root); 1180 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 1181 0, &eb); 1182 btrfs_tree_unlock(eb); 1183 free_extent_buffer(eb); 1184 1185 if (ret) 1186 return ret; 1187 1188 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1189 if (ret) 1190 return ret; 1191 1192 ret = btrfs_run_dev_stats(trans); 1193 if (ret) 1194 return ret; 1195 ret = btrfs_run_dev_replace(trans); 1196 if (ret) 1197 return ret; 1198 ret = btrfs_run_qgroups(trans); 1199 if (ret) 1200 return ret; 1201 1202 ret = btrfs_setup_space_cache(trans); 1203 if (ret) 1204 return ret; 1205 1206 /* run_qgroups might have added some more refs */ 1207 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1208 if (ret) 1209 return ret; 1210 again: 1211 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 1212 struct btrfs_root *root; 1213 next = fs_info->dirty_cowonly_roots.next; 1214 list_del_init(next); 1215 root = list_entry(next, struct btrfs_root, dirty_list); 1216 clear_bit(BTRFS_ROOT_DIRTY, &root->state); 1217 1218 if (root != fs_info->extent_root) 1219 list_add_tail(&root->dirty_list, 1220 &trans->transaction->switch_commits); 1221 ret = update_cowonly_root(trans, root); 1222 if (ret) 1223 return ret; 1224 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1225 if (ret) 1226 return ret; 1227 } 1228 1229 while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) { 1230 ret = btrfs_write_dirty_block_groups(trans); 1231 if (ret) 1232 return ret; 1233 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1234 if (ret) 1235 return ret; 1236 } 1237 1238 if (!list_empty(&fs_info->dirty_cowonly_roots)) 1239 goto again; 1240 1241 list_add_tail(&fs_info->extent_root->dirty_list, 1242 &trans->transaction->switch_commits); 1243 1244 /* Update dev-replace pointer once everything is committed */ 1245 fs_info->dev_replace.committed_cursor_left = 1246 fs_info->dev_replace.cursor_left_last_write_of_item; 1247 1248 return 0; 1249 } 1250 1251 /* 1252 * dead roots are old snapshots that need to be deleted. This allocates 1253 * a dirty root struct and adds it into the list of dead roots that need to 1254 * be deleted 1255 */ 1256 void btrfs_add_dead_root(struct btrfs_root *root) 1257 { 1258 struct btrfs_fs_info *fs_info = root->fs_info; 1259 1260 spin_lock(&fs_info->trans_lock); 1261 if (list_empty(&root->root_list)) 1262 list_add_tail(&root->root_list, &fs_info->dead_roots); 1263 spin_unlock(&fs_info->trans_lock); 1264 } 1265 1266 /* 1267 * update all the cowonly tree roots on disk 1268 */ 1269 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) 1270 { 1271 struct btrfs_fs_info *fs_info = trans->fs_info; 1272 struct btrfs_root *gang[8]; 1273 int i; 1274 int ret; 1275 int err = 0; 1276 1277 spin_lock(&fs_info->fs_roots_radix_lock); 1278 while (1) { 1279 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 1280 (void **)gang, 0, 1281 ARRAY_SIZE(gang), 1282 BTRFS_ROOT_TRANS_TAG); 1283 if (ret == 0) 1284 break; 1285 for (i = 0; i < ret; i++) { 1286 struct btrfs_root *root = gang[i]; 1287 radix_tree_tag_clear(&fs_info->fs_roots_radix, 1288 (unsigned long)root->root_key.objectid, 1289 BTRFS_ROOT_TRANS_TAG); 1290 spin_unlock(&fs_info->fs_roots_radix_lock); 1291 1292 btrfs_free_log(trans, root); 1293 btrfs_update_reloc_root(trans, root); 1294 1295 btrfs_save_ino_cache(root, trans); 1296 1297 /* see comments in should_cow_block() */ 1298 clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1299 smp_mb__after_atomic(); 1300 1301 if (root->commit_root != root->node) { 1302 list_add_tail(&root->dirty_list, 1303 &trans->transaction->switch_commits); 1304 btrfs_set_root_node(&root->root_item, 1305 root->node); 1306 } 1307 1308 err = btrfs_update_root(trans, fs_info->tree_root, 1309 &root->root_key, 1310 &root->root_item); 1311 spin_lock(&fs_info->fs_roots_radix_lock); 1312 if (err) 1313 break; 1314 btrfs_qgroup_free_meta_all_pertrans(root); 1315 } 1316 } 1317 spin_unlock(&fs_info->fs_roots_radix_lock); 1318 return err; 1319 } 1320 1321 /* 1322 * defrag a given btree. 1323 * Every leaf in the btree is read and defragged. 1324 */ 1325 int btrfs_defrag_root(struct btrfs_root *root) 1326 { 1327 struct btrfs_fs_info *info = root->fs_info; 1328 struct btrfs_trans_handle *trans; 1329 int ret; 1330 1331 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state)) 1332 return 0; 1333 1334 while (1) { 1335 trans = btrfs_start_transaction(root, 0); 1336 if (IS_ERR(trans)) 1337 return PTR_ERR(trans); 1338 1339 ret = btrfs_defrag_leaves(trans, root); 1340 1341 btrfs_end_transaction(trans); 1342 btrfs_btree_balance_dirty(info); 1343 cond_resched(); 1344 1345 if (btrfs_fs_closing(info) || ret != -EAGAIN) 1346 break; 1347 1348 if (btrfs_defrag_cancelled(info)) { 1349 btrfs_debug(info, "defrag_root cancelled"); 1350 ret = -EAGAIN; 1351 break; 1352 } 1353 } 1354 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state); 1355 return ret; 1356 } 1357 1358 /* 1359 * Do all special snapshot related qgroup dirty hack. 1360 * 1361 * Will do all needed qgroup inherit and dirty hack like switch commit 1362 * roots inside one transaction and write all btree into disk, to make 1363 * qgroup works. 1364 */ 1365 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, 1366 struct btrfs_root *src, 1367 struct btrfs_root *parent, 1368 struct btrfs_qgroup_inherit *inherit, 1369 u64 dst_objectid) 1370 { 1371 struct btrfs_fs_info *fs_info = src->fs_info; 1372 int ret; 1373 1374 /* 1375 * Save some performance in the case that qgroups are not 1376 * enabled. If this check races with the ioctl, rescan will 1377 * kick in anyway. 1378 */ 1379 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 1380 return 0; 1381 1382 /* 1383 * Ensure dirty @src will be committed. Or, after coming 1384 * commit_fs_roots() and switch_commit_roots(), any dirty but not 1385 * recorded root will never be updated again, causing an outdated root 1386 * item. 1387 */ 1388 record_root_in_trans(trans, src, 1); 1389 1390 /* 1391 * We are going to commit transaction, see btrfs_commit_transaction() 1392 * comment for reason locking tree_log_mutex 1393 */ 1394 mutex_lock(&fs_info->tree_log_mutex); 1395 1396 ret = commit_fs_roots(trans); 1397 if (ret) 1398 goto out; 1399 ret = btrfs_qgroup_account_extents(trans); 1400 if (ret < 0) 1401 goto out; 1402 1403 /* Now qgroup are all updated, we can inherit it to new qgroups */ 1404 ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid, 1405 inherit); 1406 if (ret < 0) 1407 goto out; 1408 1409 /* 1410 * Now we do a simplified commit transaction, which will: 1411 * 1) commit all subvolume and extent tree 1412 * To ensure all subvolume and extent tree have a valid 1413 * commit_root to accounting later insert_dir_item() 1414 * 2) write all btree blocks onto disk 1415 * This is to make sure later btree modification will be cowed 1416 * Or commit_root can be populated and cause wrong qgroup numbers 1417 * In this simplified commit, we don't really care about other trees 1418 * like chunk and root tree, as they won't affect qgroup. 1419 * And we don't write super to avoid half committed status. 1420 */ 1421 ret = commit_cowonly_roots(trans); 1422 if (ret) 1423 goto out; 1424 switch_commit_roots(trans->transaction); 1425 ret = btrfs_write_and_wait_transaction(trans); 1426 if (ret) 1427 btrfs_handle_fs_error(fs_info, ret, 1428 "Error while writing out transaction for qgroup"); 1429 1430 out: 1431 mutex_unlock(&fs_info->tree_log_mutex); 1432 1433 /* 1434 * Force parent root to be updated, as we recorded it before so its 1435 * last_trans == cur_transid. 1436 * Or it won't be committed again onto disk after later 1437 * insert_dir_item() 1438 */ 1439 if (!ret) 1440 record_root_in_trans(trans, parent, 1); 1441 return ret; 1442 } 1443 1444 /* 1445 * new snapshots need to be created at a very specific time in the 1446 * transaction commit. This does the actual creation. 1447 * 1448 * Note: 1449 * If the error which may affect the commitment of the current transaction 1450 * happens, we should return the error number. If the error which just affect 1451 * the creation of the pending snapshots, just return 0. 1452 */ 1453 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 1454 struct btrfs_pending_snapshot *pending) 1455 { 1456 1457 struct btrfs_fs_info *fs_info = trans->fs_info; 1458 struct btrfs_key key; 1459 struct btrfs_root_item *new_root_item; 1460 struct btrfs_root *tree_root = fs_info->tree_root; 1461 struct btrfs_root *root = pending->root; 1462 struct btrfs_root *parent_root; 1463 struct btrfs_block_rsv *rsv; 1464 struct inode *parent_inode; 1465 struct btrfs_path *path; 1466 struct btrfs_dir_item *dir_item; 1467 struct dentry *dentry; 1468 struct extent_buffer *tmp; 1469 struct extent_buffer *old; 1470 struct timespec64 cur_time; 1471 int ret = 0; 1472 u64 to_reserve = 0; 1473 u64 index = 0; 1474 u64 objectid; 1475 u64 root_flags; 1476 uuid_le new_uuid; 1477 1478 ASSERT(pending->path); 1479 path = pending->path; 1480 1481 ASSERT(pending->root_item); 1482 new_root_item = pending->root_item; 1483 1484 pending->error = btrfs_find_free_objectid(tree_root, &objectid); 1485 if (pending->error) 1486 goto no_free_objectid; 1487 1488 /* 1489 * Make qgroup to skip current new snapshot's qgroupid, as it is 1490 * accounted by later btrfs_qgroup_inherit(). 1491 */ 1492 btrfs_set_skip_qgroup(trans, objectid); 1493 1494 btrfs_reloc_pre_snapshot(pending, &to_reserve); 1495 1496 if (to_reserve > 0) { 1497 pending->error = btrfs_block_rsv_add(root, 1498 &pending->block_rsv, 1499 to_reserve, 1500 BTRFS_RESERVE_NO_FLUSH); 1501 if (pending->error) 1502 goto clear_skip_qgroup; 1503 } 1504 1505 key.objectid = objectid; 1506 key.offset = (u64)-1; 1507 key.type = BTRFS_ROOT_ITEM_KEY; 1508 1509 rsv = trans->block_rsv; 1510 trans->block_rsv = &pending->block_rsv; 1511 trans->bytes_reserved = trans->block_rsv->reserved; 1512 trace_btrfs_space_reservation(fs_info, "transaction", 1513 trans->transid, 1514 trans->bytes_reserved, 1); 1515 dentry = pending->dentry; 1516 parent_inode = pending->dir; 1517 parent_root = BTRFS_I(parent_inode)->root; 1518 record_root_in_trans(trans, parent_root, 0); 1519 1520 cur_time = current_time(parent_inode); 1521 1522 /* 1523 * insert the directory item 1524 */ 1525 ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index); 1526 BUG_ON(ret); /* -ENOMEM */ 1527 1528 /* check if there is a file/dir which has the same name. */ 1529 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, 1530 btrfs_ino(BTRFS_I(parent_inode)), 1531 dentry->d_name.name, 1532 dentry->d_name.len, 0); 1533 if (dir_item != NULL && !IS_ERR(dir_item)) { 1534 pending->error = -EEXIST; 1535 goto dir_item_existed; 1536 } else if (IS_ERR(dir_item)) { 1537 ret = PTR_ERR(dir_item); 1538 btrfs_abort_transaction(trans, ret); 1539 goto fail; 1540 } 1541 btrfs_release_path(path); 1542 1543 /* 1544 * pull in the delayed directory update 1545 * and the delayed inode item 1546 * otherwise we corrupt the FS during 1547 * snapshot 1548 */ 1549 ret = btrfs_run_delayed_items(trans); 1550 if (ret) { /* Transaction aborted */ 1551 btrfs_abort_transaction(trans, ret); 1552 goto fail; 1553 } 1554 1555 record_root_in_trans(trans, root, 0); 1556 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 1557 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 1558 btrfs_check_and_init_root_item(new_root_item); 1559 1560 root_flags = btrfs_root_flags(new_root_item); 1561 if (pending->readonly) 1562 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 1563 else 1564 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 1565 btrfs_set_root_flags(new_root_item, root_flags); 1566 1567 btrfs_set_root_generation_v2(new_root_item, 1568 trans->transid); 1569 uuid_le_gen(&new_uuid); 1570 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE); 1571 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1572 BTRFS_UUID_SIZE); 1573 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) { 1574 memset(new_root_item->received_uuid, 0, 1575 sizeof(new_root_item->received_uuid)); 1576 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1577 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1578 btrfs_set_root_stransid(new_root_item, 0); 1579 btrfs_set_root_rtransid(new_root_item, 0); 1580 } 1581 btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec); 1582 btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec); 1583 btrfs_set_root_otransid(new_root_item, trans->transid); 1584 1585 old = btrfs_lock_root_node(root); 1586 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old); 1587 if (ret) { 1588 btrfs_tree_unlock(old); 1589 free_extent_buffer(old); 1590 btrfs_abort_transaction(trans, ret); 1591 goto fail; 1592 } 1593 1594 btrfs_set_lock_blocking_write(old); 1595 1596 ret = btrfs_copy_root(trans, root, old, &tmp, objectid); 1597 /* clean up in any case */ 1598 btrfs_tree_unlock(old); 1599 free_extent_buffer(old); 1600 if (ret) { 1601 btrfs_abort_transaction(trans, ret); 1602 goto fail; 1603 } 1604 /* see comments in should_cow_block() */ 1605 set_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1606 smp_wmb(); 1607 1608 btrfs_set_root_node(new_root_item, tmp); 1609 /* record when the snapshot was created in key.offset */ 1610 key.offset = trans->transid; 1611 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1612 btrfs_tree_unlock(tmp); 1613 free_extent_buffer(tmp); 1614 if (ret) { 1615 btrfs_abort_transaction(trans, ret); 1616 goto fail; 1617 } 1618 1619 /* 1620 * insert root back/forward references 1621 */ 1622 ret = btrfs_add_root_ref(trans, objectid, 1623 parent_root->root_key.objectid, 1624 btrfs_ino(BTRFS_I(parent_inode)), index, 1625 dentry->d_name.name, dentry->d_name.len); 1626 if (ret) { 1627 btrfs_abort_transaction(trans, ret); 1628 goto fail; 1629 } 1630 1631 key.offset = (u64)-1; 1632 pending->snap = btrfs_read_fs_root_no_name(fs_info, &key); 1633 if (IS_ERR(pending->snap)) { 1634 ret = PTR_ERR(pending->snap); 1635 btrfs_abort_transaction(trans, ret); 1636 goto fail; 1637 } 1638 1639 ret = btrfs_reloc_post_snapshot(trans, pending); 1640 if (ret) { 1641 btrfs_abort_transaction(trans, ret); 1642 goto fail; 1643 } 1644 1645 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1646 if (ret) { 1647 btrfs_abort_transaction(trans, ret); 1648 goto fail; 1649 } 1650 1651 /* 1652 * Do special qgroup accounting for snapshot, as we do some qgroup 1653 * snapshot hack to do fast snapshot. 1654 * To co-operate with that hack, we do hack again. 1655 * Or snapshot will be greatly slowed down by a subtree qgroup rescan 1656 */ 1657 ret = qgroup_account_snapshot(trans, root, parent_root, 1658 pending->inherit, objectid); 1659 if (ret < 0) 1660 goto fail; 1661 1662 ret = btrfs_insert_dir_item(trans, dentry->d_name.name, 1663 dentry->d_name.len, BTRFS_I(parent_inode), 1664 &key, BTRFS_FT_DIR, index); 1665 /* We have check then name at the beginning, so it is impossible. */ 1666 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); 1667 if (ret) { 1668 btrfs_abort_transaction(trans, ret); 1669 goto fail; 1670 } 1671 1672 btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size + 1673 dentry->d_name.len * 2); 1674 parent_inode->i_mtime = parent_inode->i_ctime = 1675 current_time(parent_inode); 1676 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode); 1677 if (ret) { 1678 btrfs_abort_transaction(trans, ret); 1679 goto fail; 1680 } 1681 ret = btrfs_uuid_tree_add(trans, new_uuid.b, BTRFS_UUID_KEY_SUBVOL, 1682 objectid); 1683 if (ret) { 1684 btrfs_abort_transaction(trans, ret); 1685 goto fail; 1686 } 1687 if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) { 1688 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid, 1689 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 1690 objectid); 1691 if (ret && ret != -EEXIST) { 1692 btrfs_abort_transaction(trans, ret); 1693 goto fail; 1694 } 1695 } 1696 1697 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1698 if (ret) { 1699 btrfs_abort_transaction(trans, ret); 1700 goto fail; 1701 } 1702 1703 fail: 1704 pending->error = ret; 1705 dir_item_existed: 1706 trans->block_rsv = rsv; 1707 trans->bytes_reserved = 0; 1708 clear_skip_qgroup: 1709 btrfs_clear_skip_qgroup(trans); 1710 no_free_objectid: 1711 kfree(new_root_item); 1712 pending->root_item = NULL; 1713 btrfs_free_path(path); 1714 pending->path = NULL; 1715 1716 return ret; 1717 } 1718 1719 /* 1720 * create all the snapshots we've scheduled for creation 1721 */ 1722 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans) 1723 { 1724 struct btrfs_pending_snapshot *pending, *next; 1725 struct list_head *head = &trans->transaction->pending_snapshots; 1726 int ret = 0; 1727 1728 list_for_each_entry_safe(pending, next, head, list) { 1729 list_del(&pending->list); 1730 ret = create_pending_snapshot(trans, pending); 1731 if (ret) 1732 break; 1733 } 1734 return ret; 1735 } 1736 1737 static void update_super_roots(struct btrfs_fs_info *fs_info) 1738 { 1739 struct btrfs_root_item *root_item; 1740 struct btrfs_super_block *super; 1741 1742 super = fs_info->super_copy; 1743 1744 root_item = &fs_info->chunk_root->root_item; 1745 super->chunk_root = root_item->bytenr; 1746 super->chunk_root_generation = root_item->generation; 1747 super->chunk_root_level = root_item->level; 1748 1749 root_item = &fs_info->tree_root->root_item; 1750 super->root = root_item->bytenr; 1751 super->generation = root_item->generation; 1752 super->root_level = root_item->level; 1753 if (btrfs_test_opt(fs_info, SPACE_CACHE)) 1754 super->cache_generation = root_item->generation; 1755 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags)) 1756 super->uuid_tree_generation = root_item->generation; 1757 } 1758 1759 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1760 { 1761 struct btrfs_transaction *trans; 1762 int ret = 0; 1763 1764 spin_lock(&info->trans_lock); 1765 trans = info->running_transaction; 1766 if (trans) 1767 ret = (trans->state >= TRANS_STATE_COMMIT_START); 1768 spin_unlock(&info->trans_lock); 1769 return ret; 1770 } 1771 1772 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1773 { 1774 struct btrfs_transaction *trans; 1775 int ret = 0; 1776 1777 spin_lock(&info->trans_lock); 1778 trans = info->running_transaction; 1779 if (trans) 1780 ret = is_transaction_blocked(trans); 1781 spin_unlock(&info->trans_lock); 1782 return ret; 1783 } 1784 1785 /* 1786 * wait for the current transaction commit to start and block subsequent 1787 * transaction joins 1788 */ 1789 static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info, 1790 struct btrfs_transaction *trans) 1791 { 1792 wait_event(fs_info->transaction_blocked_wait, 1793 trans->state >= TRANS_STATE_COMMIT_START || trans->aborted); 1794 } 1795 1796 /* 1797 * wait for the current transaction to start and then become unblocked. 1798 * caller holds ref. 1799 */ 1800 static void wait_current_trans_commit_start_and_unblock( 1801 struct btrfs_fs_info *fs_info, 1802 struct btrfs_transaction *trans) 1803 { 1804 wait_event(fs_info->transaction_wait, 1805 trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted); 1806 } 1807 1808 /* 1809 * commit transactions asynchronously. once btrfs_commit_transaction_async 1810 * returns, any subsequent transaction will not be allowed to join. 1811 */ 1812 struct btrfs_async_commit { 1813 struct btrfs_trans_handle *newtrans; 1814 struct work_struct work; 1815 }; 1816 1817 static void do_async_commit(struct work_struct *work) 1818 { 1819 struct btrfs_async_commit *ac = 1820 container_of(work, struct btrfs_async_commit, work); 1821 1822 /* 1823 * We've got freeze protection passed with the transaction. 1824 * Tell lockdep about it. 1825 */ 1826 if (ac->newtrans->type & __TRANS_FREEZABLE) 1827 __sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS); 1828 1829 current->journal_info = ac->newtrans; 1830 1831 btrfs_commit_transaction(ac->newtrans); 1832 kfree(ac); 1833 } 1834 1835 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1836 int wait_for_unblock) 1837 { 1838 struct btrfs_fs_info *fs_info = trans->fs_info; 1839 struct btrfs_async_commit *ac; 1840 struct btrfs_transaction *cur_trans; 1841 1842 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1843 if (!ac) 1844 return -ENOMEM; 1845 1846 INIT_WORK(&ac->work, do_async_commit); 1847 ac->newtrans = btrfs_join_transaction(trans->root); 1848 if (IS_ERR(ac->newtrans)) { 1849 int err = PTR_ERR(ac->newtrans); 1850 kfree(ac); 1851 return err; 1852 } 1853 1854 /* take transaction reference */ 1855 cur_trans = trans->transaction; 1856 refcount_inc(&cur_trans->use_count); 1857 1858 btrfs_end_transaction(trans); 1859 1860 /* 1861 * Tell lockdep we've released the freeze rwsem, since the 1862 * async commit thread will be the one to unlock it. 1863 */ 1864 if (ac->newtrans->type & __TRANS_FREEZABLE) 1865 __sb_writers_release(fs_info->sb, SB_FREEZE_FS); 1866 1867 schedule_work(&ac->work); 1868 1869 /* wait for transaction to start and unblock */ 1870 if (wait_for_unblock) 1871 wait_current_trans_commit_start_and_unblock(fs_info, cur_trans); 1872 else 1873 wait_current_trans_commit_start(fs_info, cur_trans); 1874 1875 if (current->journal_info == trans) 1876 current->journal_info = NULL; 1877 1878 btrfs_put_transaction(cur_trans); 1879 return 0; 1880 } 1881 1882 1883 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err) 1884 { 1885 struct btrfs_fs_info *fs_info = trans->fs_info; 1886 struct btrfs_transaction *cur_trans = trans->transaction; 1887 1888 WARN_ON(refcount_read(&trans->use_count) > 1); 1889 1890 btrfs_abort_transaction(trans, err); 1891 1892 spin_lock(&fs_info->trans_lock); 1893 1894 /* 1895 * If the transaction is removed from the list, it means this 1896 * transaction has been committed successfully, so it is impossible 1897 * to call the cleanup function. 1898 */ 1899 BUG_ON(list_empty(&cur_trans->list)); 1900 1901 list_del_init(&cur_trans->list); 1902 if (cur_trans == fs_info->running_transaction) { 1903 cur_trans->state = TRANS_STATE_COMMIT_DOING; 1904 spin_unlock(&fs_info->trans_lock); 1905 wait_event(cur_trans->writer_wait, 1906 atomic_read(&cur_trans->num_writers) == 1); 1907 1908 spin_lock(&fs_info->trans_lock); 1909 } 1910 spin_unlock(&fs_info->trans_lock); 1911 1912 btrfs_cleanup_one_transaction(trans->transaction, fs_info); 1913 1914 spin_lock(&fs_info->trans_lock); 1915 if (cur_trans == fs_info->running_transaction) 1916 fs_info->running_transaction = NULL; 1917 spin_unlock(&fs_info->trans_lock); 1918 1919 if (trans->type & __TRANS_FREEZABLE) 1920 sb_end_intwrite(fs_info->sb); 1921 btrfs_put_transaction(cur_trans); 1922 btrfs_put_transaction(cur_trans); 1923 1924 trace_btrfs_transaction_commit(trans->root); 1925 1926 if (current->journal_info == trans) 1927 current->journal_info = NULL; 1928 btrfs_scrub_cancel(fs_info); 1929 1930 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1931 } 1932 1933 /* 1934 * Release reserved delayed ref space of all pending block groups of the 1935 * transaction and remove them from the list 1936 */ 1937 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) 1938 { 1939 struct btrfs_fs_info *fs_info = trans->fs_info; 1940 struct btrfs_block_group *block_group, *tmp; 1941 1942 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { 1943 btrfs_delayed_refs_rsv_release(fs_info, 1); 1944 list_del_init(&block_group->bg_list); 1945 } 1946 } 1947 1948 static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans) 1949 { 1950 struct btrfs_fs_info *fs_info = trans->fs_info; 1951 1952 /* 1953 * We use writeback_inodes_sb here because if we used 1954 * btrfs_start_delalloc_roots we would deadlock with fs freeze. 1955 * Currently are holding the fs freeze lock, if we do an async flush 1956 * we'll do btrfs_join_transaction() and deadlock because we need to 1957 * wait for the fs freeze lock. Using the direct flushing we benefit 1958 * from already being in a transaction and our join_transaction doesn't 1959 * have to re-take the fs freeze lock. 1960 */ 1961 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) { 1962 writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); 1963 } else { 1964 struct btrfs_pending_snapshot *pending; 1965 struct list_head *head = &trans->transaction->pending_snapshots; 1966 1967 /* 1968 * Flush dellaloc for any root that is going to be snapshotted. 1969 * This is done to avoid a corrupted version of files, in the 1970 * snapshots, that had both buffered and direct IO writes (even 1971 * if they were done sequentially) due to an unordered update of 1972 * the inode's size on disk. 1973 */ 1974 list_for_each_entry(pending, head, list) { 1975 int ret; 1976 1977 ret = btrfs_start_delalloc_snapshot(pending->root); 1978 if (ret) 1979 return ret; 1980 } 1981 } 1982 return 0; 1983 } 1984 1985 static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans) 1986 { 1987 struct btrfs_fs_info *fs_info = trans->fs_info; 1988 1989 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) { 1990 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); 1991 } else { 1992 struct btrfs_pending_snapshot *pending; 1993 struct list_head *head = &trans->transaction->pending_snapshots; 1994 1995 /* 1996 * Wait for any dellaloc that we started previously for the roots 1997 * that are going to be snapshotted. This is to avoid a corrupted 1998 * version of files in the snapshots that had both buffered and 1999 * direct IO writes (even if they were done sequentially). 2000 */ 2001 list_for_each_entry(pending, head, list) 2002 btrfs_wait_ordered_extents(pending->root, 2003 U64_MAX, 0, U64_MAX); 2004 } 2005 } 2006 2007 int btrfs_commit_transaction(struct btrfs_trans_handle *trans) 2008 { 2009 struct btrfs_fs_info *fs_info = trans->fs_info; 2010 struct btrfs_transaction *cur_trans = trans->transaction; 2011 struct btrfs_transaction *prev_trans = NULL; 2012 int ret; 2013 2014 ASSERT(refcount_read(&trans->use_count) == 1); 2015 2016 /* Stop the commit early if ->aborted is set */ 2017 if (unlikely(READ_ONCE(cur_trans->aborted))) { 2018 ret = cur_trans->aborted; 2019 btrfs_end_transaction(trans); 2020 return ret; 2021 } 2022 2023 btrfs_trans_release_metadata(trans); 2024 trans->block_rsv = NULL; 2025 2026 /* make a pass through all the delayed refs we have so far 2027 * any runnings procs may add more while we are here 2028 */ 2029 ret = btrfs_run_delayed_refs(trans, 0); 2030 if (ret) { 2031 btrfs_end_transaction(trans); 2032 return ret; 2033 } 2034 2035 cur_trans = trans->transaction; 2036 2037 /* 2038 * set the flushing flag so procs in this transaction have to 2039 * start sending their work down. 2040 */ 2041 cur_trans->delayed_refs.flushing = 1; 2042 smp_wmb(); 2043 2044 btrfs_create_pending_block_groups(trans); 2045 2046 ret = btrfs_run_delayed_refs(trans, 0); 2047 if (ret) { 2048 btrfs_end_transaction(trans); 2049 return ret; 2050 } 2051 2052 if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) { 2053 int run_it = 0; 2054 2055 /* this mutex is also taken before trying to set 2056 * block groups readonly. We need to make sure 2057 * that nobody has set a block group readonly 2058 * after a extents from that block group have been 2059 * allocated for cache files. btrfs_set_block_group_ro 2060 * will wait for the transaction to commit if it 2061 * finds BTRFS_TRANS_DIRTY_BG_RUN set. 2062 * 2063 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure 2064 * only one process starts all the block group IO. It wouldn't 2065 * hurt to have more than one go through, but there's no 2066 * real advantage to it either. 2067 */ 2068 mutex_lock(&fs_info->ro_block_group_mutex); 2069 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN, 2070 &cur_trans->flags)) 2071 run_it = 1; 2072 mutex_unlock(&fs_info->ro_block_group_mutex); 2073 2074 if (run_it) { 2075 ret = btrfs_start_dirty_block_groups(trans); 2076 if (ret) { 2077 btrfs_end_transaction(trans); 2078 return ret; 2079 } 2080 } 2081 } 2082 2083 spin_lock(&fs_info->trans_lock); 2084 if (cur_trans->state >= TRANS_STATE_COMMIT_START) { 2085 spin_unlock(&fs_info->trans_lock); 2086 refcount_inc(&cur_trans->use_count); 2087 ret = btrfs_end_transaction(trans); 2088 2089 wait_for_commit(cur_trans); 2090 2091 if (unlikely(cur_trans->aborted)) 2092 ret = cur_trans->aborted; 2093 2094 btrfs_put_transaction(cur_trans); 2095 2096 return ret; 2097 } 2098 2099 cur_trans->state = TRANS_STATE_COMMIT_START; 2100 wake_up(&fs_info->transaction_blocked_wait); 2101 2102 if (cur_trans->list.prev != &fs_info->trans_list) { 2103 prev_trans = list_entry(cur_trans->list.prev, 2104 struct btrfs_transaction, list); 2105 if (prev_trans->state != TRANS_STATE_COMPLETED) { 2106 refcount_inc(&prev_trans->use_count); 2107 spin_unlock(&fs_info->trans_lock); 2108 2109 wait_for_commit(prev_trans); 2110 ret = prev_trans->aborted; 2111 2112 btrfs_put_transaction(prev_trans); 2113 if (ret) 2114 goto cleanup_transaction; 2115 } else { 2116 spin_unlock(&fs_info->trans_lock); 2117 } 2118 } else { 2119 spin_unlock(&fs_info->trans_lock); 2120 /* 2121 * The previous transaction was aborted and was already removed 2122 * from the list of transactions at fs_info->trans_list. So we 2123 * abort to prevent writing a new superblock that reflects a 2124 * corrupt state (pointing to trees with unwritten nodes/leafs). 2125 */ 2126 if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) { 2127 ret = -EROFS; 2128 goto cleanup_transaction; 2129 } 2130 } 2131 2132 extwriter_counter_dec(cur_trans, trans->type); 2133 2134 ret = btrfs_start_delalloc_flush(trans); 2135 if (ret) 2136 goto cleanup_transaction; 2137 2138 ret = btrfs_run_delayed_items(trans); 2139 if (ret) 2140 goto cleanup_transaction; 2141 2142 wait_event(cur_trans->writer_wait, 2143 extwriter_counter_read(cur_trans) == 0); 2144 2145 /* some pending stuffs might be added after the previous flush. */ 2146 ret = btrfs_run_delayed_items(trans); 2147 if (ret) 2148 goto cleanup_transaction; 2149 2150 btrfs_wait_delalloc_flush(trans); 2151 2152 btrfs_scrub_pause(fs_info); 2153 /* 2154 * Ok now we need to make sure to block out any other joins while we 2155 * commit the transaction. We could have started a join before setting 2156 * COMMIT_DOING so make sure to wait for num_writers to == 1 again. 2157 */ 2158 spin_lock(&fs_info->trans_lock); 2159 cur_trans->state = TRANS_STATE_COMMIT_DOING; 2160 spin_unlock(&fs_info->trans_lock); 2161 wait_event(cur_trans->writer_wait, 2162 atomic_read(&cur_trans->num_writers) == 1); 2163 2164 /* ->aborted might be set after the previous check, so check it */ 2165 if (unlikely(READ_ONCE(cur_trans->aborted))) { 2166 ret = cur_trans->aborted; 2167 goto scrub_continue; 2168 } 2169 /* 2170 * the reloc mutex makes sure that we stop 2171 * the balancing code from coming in and moving 2172 * extents around in the middle of the commit 2173 */ 2174 mutex_lock(&fs_info->reloc_mutex); 2175 2176 /* 2177 * We needn't worry about the delayed items because we will 2178 * deal with them in create_pending_snapshot(), which is the 2179 * core function of the snapshot creation. 2180 */ 2181 ret = create_pending_snapshots(trans); 2182 if (ret) { 2183 mutex_unlock(&fs_info->reloc_mutex); 2184 goto scrub_continue; 2185 } 2186 2187 /* 2188 * We insert the dir indexes of the snapshots and update the inode 2189 * of the snapshots' parents after the snapshot creation, so there 2190 * are some delayed items which are not dealt with. Now deal with 2191 * them. 2192 * 2193 * We needn't worry that this operation will corrupt the snapshots, 2194 * because all the tree which are snapshoted will be forced to COW 2195 * the nodes and leaves. 2196 */ 2197 ret = btrfs_run_delayed_items(trans); 2198 if (ret) { 2199 mutex_unlock(&fs_info->reloc_mutex); 2200 goto scrub_continue; 2201 } 2202 2203 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 2204 if (ret) { 2205 mutex_unlock(&fs_info->reloc_mutex); 2206 goto scrub_continue; 2207 } 2208 2209 /* 2210 * make sure none of the code above managed to slip in a 2211 * delayed item 2212 */ 2213 btrfs_assert_delayed_root_empty(fs_info); 2214 2215 WARN_ON(cur_trans != trans->transaction); 2216 2217 /* btrfs_commit_tree_roots is responsible for getting the 2218 * various roots consistent with each other. Every pointer 2219 * in the tree of tree roots has to point to the most up to date 2220 * root for every subvolume and other tree. So, we have to keep 2221 * the tree logging code from jumping in and changing any 2222 * of the trees. 2223 * 2224 * At this point in the commit, there can't be any tree-log 2225 * writers, but a little lower down we drop the trans mutex 2226 * and let new people in. By holding the tree_log_mutex 2227 * from now until after the super is written, we avoid races 2228 * with the tree-log code. 2229 */ 2230 mutex_lock(&fs_info->tree_log_mutex); 2231 2232 ret = commit_fs_roots(trans); 2233 if (ret) { 2234 mutex_unlock(&fs_info->tree_log_mutex); 2235 mutex_unlock(&fs_info->reloc_mutex); 2236 goto scrub_continue; 2237 } 2238 2239 /* 2240 * Since the transaction is done, we can apply the pending changes 2241 * before the next transaction. 2242 */ 2243 btrfs_apply_pending_changes(fs_info); 2244 2245 /* commit_fs_roots gets rid of all the tree log roots, it is now 2246 * safe to free the root of tree log roots 2247 */ 2248 btrfs_free_log_root_tree(trans, fs_info); 2249 2250 /* 2251 * commit_fs_roots() can call btrfs_save_ino_cache(), which generates 2252 * new delayed refs. Must handle them or qgroup can be wrong. 2253 */ 2254 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 2255 if (ret) { 2256 mutex_unlock(&fs_info->tree_log_mutex); 2257 mutex_unlock(&fs_info->reloc_mutex); 2258 goto scrub_continue; 2259 } 2260 2261 /* 2262 * Since fs roots are all committed, we can get a quite accurate 2263 * new_roots. So let's do quota accounting. 2264 */ 2265 ret = btrfs_qgroup_account_extents(trans); 2266 if (ret < 0) { 2267 mutex_unlock(&fs_info->tree_log_mutex); 2268 mutex_unlock(&fs_info->reloc_mutex); 2269 goto scrub_continue; 2270 } 2271 2272 ret = commit_cowonly_roots(trans); 2273 if (ret) { 2274 mutex_unlock(&fs_info->tree_log_mutex); 2275 mutex_unlock(&fs_info->reloc_mutex); 2276 goto scrub_continue; 2277 } 2278 2279 /* 2280 * The tasks which save the space cache and inode cache may also 2281 * update ->aborted, check it. 2282 */ 2283 if (unlikely(READ_ONCE(cur_trans->aborted))) { 2284 ret = cur_trans->aborted; 2285 mutex_unlock(&fs_info->tree_log_mutex); 2286 mutex_unlock(&fs_info->reloc_mutex); 2287 goto scrub_continue; 2288 } 2289 2290 btrfs_prepare_extent_commit(fs_info); 2291 2292 cur_trans = fs_info->running_transaction; 2293 2294 btrfs_set_root_node(&fs_info->tree_root->root_item, 2295 fs_info->tree_root->node); 2296 list_add_tail(&fs_info->tree_root->dirty_list, 2297 &cur_trans->switch_commits); 2298 2299 btrfs_set_root_node(&fs_info->chunk_root->root_item, 2300 fs_info->chunk_root->node); 2301 list_add_tail(&fs_info->chunk_root->dirty_list, 2302 &cur_trans->switch_commits); 2303 2304 switch_commit_roots(cur_trans); 2305 2306 ASSERT(list_empty(&cur_trans->dirty_bgs)); 2307 ASSERT(list_empty(&cur_trans->io_bgs)); 2308 update_super_roots(fs_info); 2309 2310 btrfs_set_super_log_root(fs_info->super_copy, 0); 2311 btrfs_set_super_log_root_level(fs_info->super_copy, 0); 2312 memcpy(fs_info->super_for_commit, fs_info->super_copy, 2313 sizeof(*fs_info->super_copy)); 2314 2315 btrfs_commit_device_sizes(cur_trans); 2316 2317 clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags); 2318 clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags); 2319 2320 btrfs_trans_release_chunk_metadata(trans); 2321 2322 spin_lock(&fs_info->trans_lock); 2323 cur_trans->state = TRANS_STATE_UNBLOCKED; 2324 fs_info->running_transaction = NULL; 2325 spin_unlock(&fs_info->trans_lock); 2326 mutex_unlock(&fs_info->reloc_mutex); 2327 2328 wake_up(&fs_info->transaction_wait); 2329 2330 ret = btrfs_write_and_wait_transaction(trans); 2331 if (ret) { 2332 btrfs_handle_fs_error(fs_info, ret, 2333 "Error while writing out transaction"); 2334 mutex_unlock(&fs_info->tree_log_mutex); 2335 goto scrub_continue; 2336 } 2337 2338 ret = write_all_supers(fs_info, 0); 2339 /* 2340 * the super is written, we can safely allow the tree-loggers 2341 * to go about their business 2342 */ 2343 mutex_unlock(&fs_info->tree_log_mutex); 2344 if (ret) 2345 goto scrub_continue; 2346 2347 btrfs_finish_extent_commit(trans); 2348 2349 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags)) 2350 btrfs_clear_space_info_full(fs_info); 2351 2352 fs_info->last_trans_committed = cur_trans->transid; 2353 /* 2354 * We needn't acquire the lock here because there is no other task 2355 * which can change it. 2356 */ 2357 cur_trans->state = TRANS_STATE_COMPLETED; 2358 wake_up(&cur_trans->commit_wait); 2359 clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); 2360 2361 spin_lock(&fs_info->trans_lock); 2362 list_del_init(&cur_trans->list); 2363 spin_unlock(&fs_info->trans_lock); 2364 2365 btrfs_put_transaction(cur_trans); 2366 btrfs_put_transaction(cur_trans); 2367 2368 if (trans->type & __TRANS_FREEZABLE) 2369 sb_end_intwrite(fs_info->sb); 2370 2371 trace_btrfs_transaction_commit(trans->root); 2372 2373 btrfs_scrub_continue(fs_info); 2374 2375 if (current->journal_info == trans) 2376 current->journal_info = NULL; 2377 2378 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2379 2380 return ret; 2381 2382 scrub_continue: 2383 btrfs_scrub_continue(fs_info); 2384 cleanup_transaction: 2385 btrfs_trans_release_metadata(trans); 2386 btrfs_cleanup_pending_block_groups(trans); 2387 btrfs_trans_release_chunk_metadata(trans); 2388 trans->block_rsv = NULL; 2389 btrfs_warn(fs_info, "Skipping commit of aborted transaction."); 2390 if (current->journal_info == trans) 2391 current->journal_info = NULL; 2392 cleanup_transaction(trans, ret); 2393 2394 return ret; 2395 } 2396 2397 /* 2398 * return < 0 if error 2399 * 0 if there are no more dead_roots at the time of call 2400 * 1 there are more to be processed, call me again 2401 * 2402 * The return value indicates there are certainly more snapshots to delete, but 2403 * if there comes a new one during processing, it may return 0. We don't mind, 2404 * because btrfs_commit_super will poke cleaner thread and it will process it a 2405 * few seconds later. 2406 */ 2407 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root) 2408 { 2409 int ret; 2410 struct btrfs_fs_info *fs_info = root->fs_info; 2411 2412 spin_lock(&fs_info->trans_lock); 2413 if (list_empty(&fs_info->dead_roots)) { 2414 spin_unlock(&fs_info->trans_lock); 2415 return 0; 2416 } 2417 root = list_first_entry(&fs_info->dead_roots, 2418 struct btrfs_root, root_list); 2419 list_del_init(&root->root_list); 2420 spin_unlock(&fs_info->trans_lock); 2421 2422 btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid); 2423 2424 btrfs_kill_all_delayed_nodes(root); 2425 2426 if (btrfs_header_backref_rev(root->node) < 2427 BTRFS_MIXED_BACKREF_REV) 2428 ret = btrfs_drop_snapshot(root, NULL, 0, 0); 2429 else 2430 ret = btrfs_drop_snapshot(root, NULL, 1, 0); 2431 2432 return (ret < 0) ? 0 : 1; 2433 } 2434 2435 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info) 2436 { 2437 unsigned long prev; 2438 unsigned long bit; 2439 2440 prev = xchg(&fs_info->pending_changes, 0); 2441 if (!prev) 2442 return; 2443 2444 bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE; 2445 if (prev & bit) 2446 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE); 2447 prev &= ~bit; 2448 2449 bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE; 2450 if (prev & bit) 2451 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE); 2452 prev &= ~bit; 2453 2454 bit = 1 << BTRFS_PENDING_COMMIT; 2455 if (prev & bit) 2456 btrfs_debug(fs_info, "pending commit done"); 2457 prev &= ~bit; 2458 2459 if (prev) 2460 btrfs_warn(fs_info, 2461 "unknown pending changes left 0x%lx, ignoring", prev); 2462 } 2463