1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/writeback.h> 23 #include <linux/pagemap.h> 24 #include <linux/blkdev.h> 25 #include <linux/uuid.h> 26 #include "ctree.h" 27 #include "disk-io.h" 28 #include "transaction.h" 29 #include "locking.h" 30 #include "tree-log.h" 31 #include "inode-map.h" 32 #include "volumes.h" 33 #include "dev-replace.h" 34 #include "qgroup.h" 35 36 #define BTRFS_ROOT_TRANS_TAG 0 37 38 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { 39 [TRANS_STATE_RUNNING] = 0U, 40 [TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE | 41 __TRANS_START), 42 [TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE | 43 __TRANS_START | 44 __TRANS_ATTACH), 45 [TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE | 46 __TRANS_START | 47 __TRANS_ATTACH | 48 __TRANS_JOIN), 49 [TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE | 50 __TRANS_START | 51 __TRANS_ATTACH | 52 __TRANS_JOIN | 53 __TRANS_JOIN_NOLOCK), 54 [TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE | 55 __TRANS_START | 56 __TRANS_ATTACH | 57 __TRANS_JOIN | 58 __TRANS_JOIN_NOLOCK), 59 }; 60 61 void btrfs_put_transaction(struct btrfs_transaction *transaction) 62 { 63 WARN_ON(refcount_read(&transaction->use_count) == 0); 64 if (refcount_dec_and_test(&transaction->use_count)) { 65 BUG_ON(!list_empty(&transaction->list)); 66 WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root)); 67 if (transaction->delayed_refs.pending_csums) 68 btrfs_err(transaction->fs_info, 69 "pending csums is %llu", 70 transaction->delayed_refs.pending_csums); 71 while (!list_empty(&transaction->pending_chunks)) { 72 struct extent_map *em; 73 74 em = list_first_entry(&transaction->pending_chunks, 75 struct extent_map, list); 76 list_del_init(&em->list); 77 free_extent_map(em); 78 } 79 /* 80 * If any block groups are found in ->deleted_bgs then it's 81 * because the transaction was aborted and a commit did not 82 * happen (things failed before writing the new superblock 83 * and calling btrfs_finish_extent_commit()), so we can not 84 * discard the physical locations of the block groups. 85 */ 86 while (!list_empty(&transaction->deleted_bgs)) { 87 struct btrfs_block_group_cache *cache; 88 89 cache = list_first_entry(&transaction->deleted_bgs, 90 struct btrfs_block_group_cache, 91 bg_list); 92 list_del_init(&cache->bg_list); 93 btrfs_put_block_group_trimming(cache); 94 btrfs_put_block_group(cache); 95 } 96 kfree(transaction); 97 } 98 } 99 100 static void clear_btree_io_tree(struct extent_io_tree *tree) 101 { 102 spin_lock(&tree->lock); 103 /* 104 * Do a single barrier for the waitqueue_active check here, the state 105 * of the waitqueue should not change once clear_btree_io_tree is 106 * called. 107 */ 108 smp_mb(); 109 while (!RB_EMPTY_ROOT(&tree->state)) { 110 struct rb_node *node; 111 struct extent_state *state; 112 113 node = rb_first(&tree->state); 114 state = rb_entry(node, struct extent_state, rb_node); 115 rb_erase(&state->rb_node, &tree->state); 116 RB_CLEAR_NODE(&state->rb_node); 117 /* 118 * btree io trees aren't supposed to have tasks waiting for 119 * changes in the flags of extent states ever. 120 */ 121 ASSERT(!waitqueue_active(&state->wq)); 122 free_extent_state(state); 123 124 cond_resched_lock(&tree->lock); 125 } 126 spin_unlock(&tree->lock); 127 } 128 129 static noinline void switch_commit_roots(struct btrfs_transaction *trans, 130 struct btrfs_fs_info *fs_info) 131 { 132 struct btrfs_root *root, *tmp; 133 134 down_write(&fs_info->commit_root_sem); 135 list_for_each_entry_safe(root, tmp, &trans->switch_commits, 136 dirty_list) { 137 list_del_init(&root->dirty_list); 138 free_extent_buffer(root->commit_root); 139 root->commit_root = btrfs_root_node(root); 140 if (is_fstree(root->objectid)) 141 btrfs_unpin_free_ino(root); 142 clear_btree_io_tree(&root->dirty_log_pages); 143 } 144 145 /* We can free old roots now. */ 146 spin_lock(&trans->dropped_roots_lock); 147 while (!list_empty(&trans->dropped_roots)) { 148 root = list_first_entry(&trans->dropped_roots, 149 struct btrfs_root, root_list); 150 list_del_init(&root->root_list); 151 spin_unlock(&trans->dropped_roots_lock); 152 btrfs_drop_and_free_fs_root(fs_info, root); 153 spin_lock(&trans->dropped_roots_lock); 154 } 155 spin_unlock(&trans->dropped_roots_lock); 156 up_write(&fs_info->commit_root_sem); 157 } 158 159 static inline void extwriter_counter_inc(struct btrfs_transaction *trans, 160 unsigned int type) 161 { 162 if (type & TRANS_EXTWRITERS) 163 atomic_inc(&trans->num_extwriters); 164 } 165 166 static inline void extwriter_counter_dec(struct btrfs_transaction *trans, 167 unsigned int type) 168 { 169 if (type & TRANS_EXTWRITERS) 170 atomic_dec(&trans->num_extwriters); 171 } 172 173 static inline void extwriter_counter_init(struct btrfs_transaction *trans, 174 unsigned int type) 175 { 176 atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0)); 177 } 178 179 static inline int extwriter_counter_read(struct btrfs_transaction *trans) 180 { 181 return atomic_read(&trans->num_extwriters); 182 } 183 184 /* 185 * either allocate a new transaction or hop into the existing one 186 */ 187 static noinline int join_transaction(struct btrfs_fs_info *fs_info, 188 unsigned int type) 189 { 190 struct btrfs_transaction *cur_trans; 191 192 spin_lock(&fs_info->trans_lock); 193 loop: 194 /* The file system has been taken offline. No new transactions. */ 195 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 196 spin_unlock(&fs_info->trans_lock); 197 return -EROFS; 198 } 199 200 cur_trans = fs_info->running_transaction; 201 if (cur_trans) { 202 if (cur_trans->aborted) { 203 spin_unlock(&fs_info->trans_lock); 204 return cur_trans->aborted; 205 } 206 if (btrfs_blocked_trans_types[cur_trans->state] & type) { 207 spin_unlock(&fs_info->trans_lock); 208 return -EBUSY; 209 } 210 refcount_inc(&cur_trans->use_count); 211 atomic_inc(&cur_trans->num_writers); 212 extwriter_counter_inc(cur_trans, type); 213 spin_unlock(&fs_info->trans_lock); 214 return 0; 215 } 216 spin_unlock(&fs_info->trans_lock); 217 218 /* 219 * If we are ATTACH, we just want to catch the current transaction, 220 * and commit it. If there is no transaction, just return ENOENT. 221 */ 222 if (type == TRANS_ATTACH) 223 return -ENOENT; 224 225 /* 226 * JOIN_NOLOCK only happens during the transaction commit, so 227 * it is impossible that ->running_transaction is NULL 228 */ 229 BUG_ON(type == TRANS_JOIN_NOLOCK); 230 231 cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS); 232 if (!cur_trans) 233 return -ENOMEM; 234 235 spin_lock(&fs_info->trans_lock); 236 if (fs_info->running_transaction) { 237 /* 238 * someone started a transaction after we unlocked. Make sure 239 * to redo the checks above 240 */ 241 kfree(cur_trans); 242 goto loop; 243 } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 244 spin_unlock(&fs_info->trans_lock); 245 kfree(cur_trans); 246 return -EROFS; 247 } 248 249 cur_trans->fs_info = fs_info; 250 atomic_set(&cur_trans->num_writers, 1); 251 extwriter_counter_init(cur_trans, type); 252 init_waitqueue_head(&cur_trans->writer_wait); 253 init_waitqueue_head(&cur_trans->commit_wait); 254 init_waitqueue_head(&cur_trans->pending_wait); 255 cur_trans->state = TRANS_STATE_RUNNING; 256 /* 257 * One for this trans handle, one so it will live on until we 258 * commit the transaction. 259 */ 260 refcount_set(&cur_trans->use_count, 2); 261 atomic_set(&cur_trans->pending_ordered, 0); 262 cur_trans->flags = 0; 263 cur_trans->start_time = get_seconds(); 264 265 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs)); 266 267 cur_trans->delayed_refs.href_root = RB_ROOT; 268 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT; 269 atomic_set(&cur_trans->delayed_refs.num_entries, 0); 270 271 /* 272 * although the tree mod log is per file system and not per transaction, 273 * the log must never go across transaction boundaries. 274 */ 275 smp_mb(); 276 if (!list_empty(&fs_info->tree_mod_seq_list)) 277 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n"); 278 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) 279 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n"); 280 atomic64_set(&fs_info->tree_mod_seq, 0); 281 282 spin_lock_init(&cur_trans->delayed_refs.lock); 283 284 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 285 INIT_LIST_HEAD(&cur_trans->pending_chunks); 286 INIT_LIST_HEAD(&cur_trans->switch_commits); 287 INIT_LIST_HEAD(&cur_trans->dirty_bgs); 288 INIT_LIST_HEAD(&cur_trans->io_bgs); 289 INIT_LIST_HEAD(&cur_trans->dropped_roots); 290 mutex_init(&cur_trans->cache_write_mutex); 291 cur_trans->num_dirty_bgs = 0; 292 spin_lock_init(&cur_trans->dirty_bgs_lock); 293 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 294 spin_lock_init(&cur_trans->dropped_roots_lock); 295 list_add_tail(&cur_trans->list, &fs_info->trans_list); 296 extent_io_tree_init(&cur_trans->dirty_pages, 297 fs_info->btree_inode); 298 fs_info->generation++; 299 cur_trans->transid = fs_info->generation; 300 fs_info->running_transaction = cur_trans; 301 cur_trans->aborted = 0; 302 spin_unlock(&fs_info->trans_lock); 303 304 return 0; 305 } 306 307 /* 308 * this does all the record keeping required to make sure that a reference 309 * counted root is properly recorded in a given transaction. This is required 310 * to make sure the old root from before we joined the transaction is deleted 311 * when the transaction commits 312 */ 313 static int record_root_in_trans(struct btrfs_trans_handle *trans, 314 struct btrfs_root *root, 315 int force) 316 { 317 struct btrfs_fs_info *fs_info = root->fs_info; 318 319 if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 320 root->last_trans < trans->transid) || force) { 321 WARN_ON(root == fs_info->extent_root); 322 WARN_ON(root->commit_root != root->node); 323 324 /* 325 * see below for IN_TRANS_SETUP usage rules 326 * we have the reloc mutex held now, so there 327 * is only one writer in this function 328 */ 329 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 330 331 /* make sure readers find IN_TRANS_SETUP before 332 * they find our root->last_trans update 333 */ 334 smp_wmb(); 335 336 spin_lock(&fs_info->fs_roots_radix_lock); 337 if (root->last_trans == trans->transid && !force) { 338 spin_unlock(&fs_info->fs_roots_radix_lock); 339 return 0; 340 } 341 radix_tree_tag_set(&fs_info->fs_roots_radix, 342 (unsigned long)root->root_key.objectid, 343 BTRFS_ROOT_TRANS_TAG); 344 spin_unlock(&fs_info->fs_roots_radix_lock); 345 root->last_trans = trans->transid; 346 347 /* this is pretty tricky. We don't want to 348 * take the relocation lock in btrfs_record_root_in_trans 349 * unless we're really doing the first setup for this root in 350 * this transaction. 351 * 352 * Normally we'd use root->last_trans as a flag to decide 353 * if we want to take the expensive mutex. 354 * 355 * But, we have to set root->last_trans before we 356 * init the relocation root, otherwise, we trip over warnings 357 * in ctree.c. The solution used here is to flag ourselves 358 * with root IN_TRANS_SETUP. When this is 1, we're still 359 * fixing up the reloc trees and everyone must wait. 360 * 361 * When this is zero, they can trust root->last_trans and fly 362 * through btrfs_record_root_in_trans without having to take the 363 * lock. smp_wmb() makes sure that all the writes above are 364 * done before we pop in the zero below 365 */ 366 btrfs_init_reloc_root(trans, root); 367 smp_mb__before_atomic(); 368 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 369 } 370 return 0; 371 } 372 373 374 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, 375 struct btrfs_root *root) 376 { 377 struct btrfs_fs_info *fs_info = root->fs_info; 378 struct btrfs_transaction *cur_trans = trans->transaction; 379 380 /* Add ourselves to the transaction dropped list */ 381 spin_lock(&cur_trans->dropped_roots_lock); 382 list_add_tail(&root->root_list, &cur_trans->dropped_roots); 383 spin_unlock(&cur_trans->dropped_roots_lock); 384 385 /* Make sure we don't try to update the root at commit time */ 386 spin_lock(&fs_info->fs_roots_radix_lock); 387 radix_tree_tag_clear(&fs_info->fs_roots_radix, 388 (unsigned long)root->root_key.objectid, 389 BTRFS_ROOT_TRANS_TAG); 390 spin_unlock(&fs_info->fs_roots_radix_lock); 391 } 392 393 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 394 struct btrfs_root *root) 395 { 396 struct btrfs_fs_info *fs_info = root->fs_info; 397 398 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 399 return 0; 400 401 /* 402 * see record_root_in_trans for comments about IN_TRANS_SETUP usage 403 * and barriers 404 */ 405 smp_rmb(); 406 if (root->last_trans == trans->transid && 407 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) 408 return 0; 409 410 mutex_lock(&fs_info->reloc_mutex); 411 record_root_in_trans(trans, root, 0); 412 mutex_unlock(&fs_info->reloc_mutex); 413 414 return 0; 415 } 416 417 static inline int is_transaction_blocked(struct btrfs_transaction *trans) 418 { 419 return (trans->state >= TRANS_STATE_BLOCKED && 420 trans->state < TRANS_STATE_UNBLOCKED && 421 !trans->aborted); 422 } 423 424 /* wait for commit against the current transaction to become unblocked 425 * when this is done, it is safe to start a new transaction, but the current 426 * transaction might not be fully on disk. 427 */ 428 static void wait_current_trans(struct btrfs_fs_info *fs_info) 429 { 430 struct btrfs_transaction *cur_trans; 431 432 spin_lock(&fs_info->trans_lock); 433 cur_trans = fs_info->running_transaction; 434 if (cur_trans && is_transaction_blocked(cur_trans)) { 435 refcount_inc(&cur_trans->use_count); 436 spin_unlock(&fs_info->trans_lock); 437 438 wait_event(fs_info->transaction_wait, 439 cur_trans->state >= TRANS_STATE_UNBLOCKED || 440 cur_trans->aborted); 441 btrfs_put_transaction(cur_trans); 442 } else { 443 spin_unlock(&fs_info->trans_lock); 444 } 445 } 446 447 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type) 448 { 449 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 450 return 0; 451 452 if (type == TRANS_USERSPACE) 453 return 1; 454 455 if (type == TRANS_START && 456 !atomic_read(&fs_info->open_ioctl_trans)) 457 return 1; 458 459 return 0; 460 } 461 462 static inline bool need_reserve_reloc_root(struct btrfs_root *root) 463 { 464 struct btrfs_fs_info *fs_info = root->fs_info; 465 466 if (!fs_info->reloc_ctl || 467 !test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 468 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 469 root->reloc_root) 470 return false; 471 472 return true; 473 } 474 475 static struct btrfs_trans_handle * 476 start_transaction(struct btrfs_root *root, unsigned int num_items, 477 unsigned int type, enum btrfs_reserve_flush_enum flush, 478 bool enforce_qgroups) 479 { 480 struct btrfs_fs_info *fs_info = root->fs_info; 481 482 struct btrfs_trans_handle *h; 483 struct btrfs_transaction *cur_trans; 484 u64 num_bytes = 0; 485 u64 qgroup_reserved = 0; 486 bool reloc_reserved = false; 487 int ret; 488 489 /* Send isn't supposed to start transactions. */ 490 ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB); 491 492 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 493 return ERR_PTR(-EROFS); 494 495 if (current->journal_info) { 496 WARN_ON(type & TRANS_EXTWRITERS); 497 h = current->journal_info; 498 h->use_count++; 499 WARN_ON(h->use_count > 2); 500 h->orig_rsv = h->block_rsv; 501 h->block_rsv = NULL; 502 goto got_it; 503 } 504 505 /* 506 * Do the reservation before we join the transaction so we can do all 507 * the appropriate flushing if need be. 508 */ 509 if (num_items && root != fs_info->chunk_root) { 510 qgroup_reserved = num_items * fs_info->nodesize; 511 ret = btrfs_qgroup_reserve_meta(root, qgroup_reserved, 512 enforce_qgroups); 513 if (ret) 514 return ERR_PTR(ret); 515 516 num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items); 517 /* 518 * Do the reservation for the relocation root creation 519 */ 520 if (need_reserve_reloc_root(root)) { 521 num_bytes += fs_info->nodesize; 522 reloc_reserved = true; 523 } 524 525 ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv, 526 num_bytes, flush); 527 if (ret) 528 goto reserve_fail; 529 } 530 again: 531 h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS); 532 if (!h) { 533 ret = -ENOMEM; 534 goto alloc_fail; 535 } 536 537 /* 538 * If we are JOIN_NOLOCK we're already committing a transaction and 539 * waiting on this guy, so we don't need to do the sb_start_intwrite 540 * because we're already holding a ref. We need this because we could 541 * have raced in and did an fsync() on a file which can kick a commit 542 * and then we deadlock with somebody doing a freeze. 543 * 544 * If we are ATTACH, it means we just want to catch the current 545 * transaction and commit it, so we needn't do sb_start_intwrite(). 546 */ 547 if (type & __TRANS_FREEZABLE) 548 sb_start_intwrite(fs_info->sb); 549 550 if (may_wait_transaction(fs_info, type)) 551 wait_current_trans(fs_info); 552 553 do { 554 ret = join_transaction(fs_info, type); 555 if (ret == -EBUSY) { 556 wait_current_trans(fs_info); 557 if (unlikely(type == TRANS_ATTACH)) 558 ret = -ENOENT; 559 } 560 } while (ret == -EBUSY); 561 562 if (ret < 0) 563 goto join_fail; 564 565 cur_trans = fs_info->running_transaction; 566 567 h->transid = cur_trans->transid; 568 h->transaction = cur_trans; 569 h->root = root; 570 h->use_count = 1; 571 h->fs_info = root->fs_info; 572 573 h->type = type; 574 h->can_flush_pending_bgs = true; 575 INIT_LIST_HEAD(&h->new_bgs); 576 577 smp_mb(); 578 if (cur_trans->state >= TRANS_STATE_BLOCKED && 579 may_wait_transaction(fs_info, type)) { 580 current->journal_info = h; 581 btrfs_commit_transaction(h); 582 goto again; 583 } 584 585 if (num_bytes) { 586 trace_btrfs_space_reservation(fs_info, "transaction", 587 h->transid, num_bytes, 1); 588 h->block_rsv = &fs_info->trans_block_rsv; 589 h->bytes_reserved = num_bytes; 590 h->reloc_reserved = reloc_reserved; 591 } 592 593 got_it: 594 btrfs_record_root_in_trans(h, root); 595 596 if (!current->journal_info && type != TRANS_USERSPACE) 597 current->journal_info = h; 598 return h; 599 600 join_fail: 601 if (type & __TRANS_FREEZABLE) 602 sb_end_intwrite(fs_info->sb); 603 kmem_cache_free(btrfs_trans_handle_cachep, h); 604 alloc_fail: 605 if (num_bytes) 606 btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv, 607 num_bytes); 608 reserve_fail: 609 btrfs_qgroup_free_meta(root, qgroup_reserved); 610 return ERR_PTR(ret); 611 } 612 613 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 614 unsigned int num_items) 615 { 616 return start_transaction(root, num_items, TRANS_START, 617 BTRFS_RESERVE_FLUSH_ALL, true); 618 } 619 620 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( 621 struct btrfs_root *root, 622 unsigned int num_items, 623 int min_factor) 624 { 625 struct btrfs_fs_info *fs_info = root->fs_info; 626 struct btrfs_trans_handle *trans; 627 u64 num_bytes; 628 int ret; 629 630 /* 631 * We have two callers: unlink and block group removal. The 632 * former should succeed even if we will temporarily exceed 633 * quota and the latter operates on the extent root so 634 * qgroup enforcement is ignored anyway. 635 */ 636 trans = start_transaction(root, num_items, TRANS_START, 637 BTRFS_RESERVE_FLUSH_ALL, false); 638 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 639 return trans; 640 641 trans = btrfs_start_transaction(root, 0); 642 if (IS_ERR(trans)) 643 return trans; 644 645 num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items); 646 ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv, 647 num_bytes, min_factor); 648 if (ret) { 649 btrfs_end_transaction(trans); 650 return ERR_PTR(ret); 651 } 652 653 trans->block_rsv = &fs_info->trans_block_rsv; 654 trans->bytes_reserved = num_bytes; 655 trace_btrfs_space_reservation(fs_info, "transaction", 656 trans->transid, num_bytes, 1); 657 658 return trans; 659 } 660 661 struct btrfs_trans_handle *btrfs_start_transaction_lflush( 662 struct btrfs_root *root, 663 unsigned int num_items) 664 { 665 return start_transaction(root, num_items, TRANS_START, 666 BTRFS_RESERVE_FLUSH_LIMIT, true); 667 } 668 669 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 670 { 671 return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH, 672 true); 673 } 674 675 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) 676 { 677 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 678 BTRFS_RESERVE_NO_FLUSH, true); 679 } 680 681 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) 682 { 683 return start_transaction(root, 0, TRANS_USERSPACE, 684 BTRFS_RESERVE_NO_FLUSH, true); 685 } 686 687 /* 688 * btrfs_attach_transaction() - catch the running transaction 689 * 690 * It is used when we want to commit the current the transaction, but 691 * don't want to start a new one. 692 * 693 * Note: If this function return -ENOENT, it just means there is no 694 * running transaction. But it is possible that the inactive transaction 695 * is still in the memory, not fully on disk. If you hope there is no 696 * inactive transaction in the fs when -ENOENT is returned, you should 697 * invoke 698 * btrfs_attach_transaction_barrier() 699 */ 700 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) 701 { 702 return start_transaction(root, 0, TRANS_ATTACH, 703 BTRFS_RESERVE_NO_FLUSH, true); 704 } 705 706 /* 707 * btrfs_attach_transaction_barrier() - catch the running transaction 708 * 709 * It is similar to the above function, the differentia is this one 710 * will wait for all the inactive transactions until they fully 711 * complete. 712 */ 713 struct btrfs_trans_handle * 714 btrfs_attach_transaction_barrier(struct btrfs_root *root) 715 { 716 struct btrfs_trans_handle *trans; 717 718 trans = start_transaction(root, 0, TRANS_ATTACH, 719 BTRFS_RESERVE_NO_FLUSH, true); 720 if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT) 721 btrfs_wait_for_commit(root->fs_info, 0); 722 723 return trans; 724 } 725 726 /* wait for a transaction commit to be fully complete */ 727 static noinline void wait_for_commit(struct btrfs_transaction *commit) 728 { 729 wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED); 730 } 731 732 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid) 733 { 734 struct btrfs_transaction *cur_trans = NULL, *t; 735 int ret = 0; 736 737 if (transid) { 738 if (transid <= fs_info->last_trans_committed) 739 goto out; 740 741 /* find specified transaction */ 742 spin_lock(&fs_info->trans_lock); 743 list_for_each_entry(t, &fs_info->trans_list, list) { 744 if (t->transid == transid) { 745 cur_trans = t; 746 refcount_inc(&cur_trans->use_count); 747 ret = 0; 748 break; 749 } 750 if (t->transid > transid) { 751 ret = 0; 752 break; 753 } 754 } 755 spin_unlock(&fs_info->trans_lock); 756 757 /* 758 * The specified transaction doesn't exist, or we 759 * raced with btrfs_commit_transaction 760 */ 761 if (!cur_trans) { 762 if (transid > fs_info->last_trans_committed) 763 ret = -EINVAL; 764 goto out; 765 } 766 } else { 767 /* find newest transaction that is committing | committed */ 768 spin_lock(&fs_info->trans_lock); 769 list_for_each_entry_reverse(t, &fs_info->trans_list, 770 list) { 771 if (t->state >= TRANS_STATE_COMMIT_START) { 772 if (t->state == TRANS_STATE_COMPLETED) 773 break; 774 cur_trans = t; 775 refcount_inc(&cur_trans->use_count); 776 break; 777 } 778 } 779 spin_unlock(&fs_info->trans_lock); 780 if (!cur_trans) 781 goto out; /* nothing committing|committed */ 782 } 783 784 wait_for_commit(cur_trans); 785 btrfs_put_transaction(cur_trans); 786 out: 787 return ret; 788 } 789 790 void btrfs_throttle(struct btrfs_fs_info *fs_info) 791 { 792 if (!atomic_read(&fs_info->open_ioctl_trans)) 793 wait_current_trans(fs_info); 794 } 795 796 static int should_end_transaction(struct btrfs_trans_handle *trans) 797 { 798 struct btrfs_fs_info *fs_info = trans->fs_info; 799 800 if (btrfs_check_space_for_delayed_refs(trans, fs_info)) 801 return 1; 802 803 return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5); 804 } 805 806 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans) 807 { 808 struct btrfs_transaction *cur_trans = trans->transaction; 809 struct btrfs_fs_info *fs_info = trans->fs_info; 810 int updates; 811 int err; 812 813 smp_mb(); 814 if (cur_trans->state >= TRANS_STATE_BLOCKED || 815 cur_trans->delayed_refs.flushing) 816 return 1; 817 818 updates = trans->delayed_ref_updates; 819 trans->delayed_ref_updates = 0; 820 if (updates) { 821 err = btrfs_run_delayed_refs(trans, fs_info, updates * 2); 822 if (err) /* Error code will also eval true */ 823 return err; 824 } 825 826 return should_end_transaction(trans); 827 } 828 829 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 830 int throttle) 831 { 832 struct btrfs_fs_info *info = trans->fs_info; 833 struct btrfs_transaction *cur_trans = trans->transaction; 834 u64 transid = trans->transid; 835 unsigned long cur = trans->delayed_ref_updates; 836 int lock = (trans->type != TRANS_JOIN_NOLOCK); 837 int err = 0; 838 int must_run_delayed_refs = 0; 839 840 if (trans->use_count > 1) { 841 trans->use_count--; 842 trans->block_rsv = trans->orig_rsv; 843 return 0; 844 } 845 846 btrfs_trans_release_metadata(trans, info); 847 trans->block_rsv = NULL; 848 849 if (!list_empty(&trans->new_bgs)) 850 btrfs_create_pending_block_groups(trans, info); 851 852 trans->delayed_ref_updates = 0; 853 if (!trans->sync) { 854 must_run_delayed_refs = 855 btrfs_should_throttle_delayed_refs(trans, info); 856 cur = max_t(unsigned long, cur, 32); 857 858 /* 859 * don't make the caller wait if they are from a NOLOCK 860 * or ATTACH transaction, it will deadlock with commit 861 */ 862 if (must_run_delayed_refs == 1 && 863 (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH))) 864 must_run_delayed_refs = 2; 865 } 866 867 btrfs_trans_release_metadata(trans, info); 868 trans->block_rsv = NULL; 869 870 if (!list_empty(&trans->new_bgs)) 871 btrfs_create_pending_block_groups(trans, info); 872 873 btrfs_trans_release_chunk_metadata(trans); 874 875 if (lock && !atomic_read(&info->open_ioctl_trans) && 876 should_end_transaction(trans) && 877 READ_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) { 878 spin_lock(&info->trans_lock); 879 if (cur_trans->state == TRANS_STATE_RUNNING) 880 cur_trans->state = TRANS_STATE_BLOCKED; 881 spin_unlock(&info->trans_lock); 882 } 883 884 if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { 885 if (throttle) 886 return btrfs_commit_transaction(trans); 887 else 888 wake_up_process(info->transaction_kthread); 889 } 890 891 if (trans->type & __TRANS_FREEZABLE) 892 sb_end_intwrite(info->sb); 893 894 WARN_ON(cur_trans != info->running_transaction); 895 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 896 atomic_dec(&cur_trans->num_writers); 897 extwriter_counter_dec(cur_trans, trans->type); 898 899 /* 900 * Make sure counter is updated before we wake up waiters. 901 */ 902 smp_mb(); 903 if (waitqueue_active(&cur_trans->writer_wait)) 904 wake_up(&cur_trans->writer_wait); 905 btrfs_put_transaction(cur_trans); 906 907 if (current->journal_info == trans) 908 current->journal_info = NULL; 909 910 if (throttle) 911 btrfs_run_delayed_iputs(info); 912 913 if (trans->aborted || 914 test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) { 915 wake_up_process(info->transaction_kthread); 916 err = -EIO; 917 } 918 919 kmem_cache_free(btrfs_trans_handle_cachep, trans); 920 if (must_run_delayed_refs) { 921 btrfs_async_run_delayed_refs(info, cur, transid, 922 must_run_delayed_refs == 1); 923 } 924 return err; 925 } 926 927 int btrfs_end_transaction(struct btrfs_trans_handle *trans) 928 { 929 return __btrfs_end_transaction(trans, 0); 930 } 931 932 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans) 933 { 934 return __btrfs_end_transaction(trans, 1); 935 } 936 937 /* 938 * when btree blocks are allocated, they have some corresponding bits set for 939 * them in one of two extent_io trees. This is used to make sure all of 940 * those extents are sent to disk but does not wait on them 941 */ 942 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, 943 struct extent_io_tree *dirty_pages, int mark) 944 { 945 int err = 0; 946 int werr = 0; 947 struct address_space *mapping = fs_info->btree_inode->i_mapping; 948 struct extent_state *cached_state = NULL; 949 u64 start = 0; 950 u64 end; 951 952 atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers); 953 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 954 mark, &cached_state)) { 955 bool wait_writeback = false; 956 957 err = convert_extent_bit(dirty_pages, start, end, 958 EXTENT_NEED_WAIT, 959 mark, &cached_state); 960 /* 961 * convert_extent_bit can return -ENOMEM, which is most of the 962 * time a temporary error. So when it happens, ignore the error 963 * and wait for writeback of this range to finish - because we 964 * failed to set the bit EXTENT_NEED_WAIT for the range, a call 965 * to __btrfs_wait_marked_extents() would not know that 966 * writeback for this range started and therefore wouldn't 967 * wait for it to finish - we don't want to commit a 968 * superblock that points to btree nodes/leafs for which 969 * writeback hasn't finished yet (and without errors). 970 * We cleanup any entries left in the io tree when committing 971 * the transaction (through clear_btree_io_tree()). 972 */ 973 if (err == -ENOMEM) { 974 err = 0; 975 wait_writeback = true; 976 } 977 if (!err) 978 err = filemap_fdatawrite_range(mapping, start, end); 979 if (err) 980 werr = err; 981 else if (wait_writeback) 982 werr = filemap_fdatawait_range(mapping, start, end); 983 free_extent_state(cached_state); 984 cached_state = NULL; 985 cond_resched(); 986 start = end + 1; 987 } 988 atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers); 989 return werr; 990 } 991 992 /* 993 * when btree blocks are allocated, they have some corresponding bits set for 994 * them in one of two extent_io trees. This is used to make sure all of 995 * those extents are on disk for transaction or log commit. We wait 996 * on all the pages and clear them from the dirty pages state tree 997 */ 998 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info, 999 struct extent_io_tree *dirty_pages) 1000 { 1001 int err = 0; 1002 int werr = 0; 1003 struct address_space *mapping = fs_info->btree_inode->i_mapping; 1004 struct extent_state *cached_state = NULL; 1005 u64 start = 0; 1006 u64 end; 1007 1008 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 1009 EXTENT_NEED_WAIT, &cached_state)) { 1010 /* 1011 * Ignore -ENOMEM errors returned by clear_extent_bit(). 1012 * When committing the transaction, we'll remove any entries 1013 * left in the io tree. For a log commit, we don't remove them 1014 * after committing the log because the tree can be accessed 1015 * concurrently - we do it only at transaction commit time when 1016 * it's safe to do it (through clear_btree_io_tree()). 1017 */ 1018 err = clear_extent_bit(dirty_pages, start, end, 1019 EXTENT_NEED_WAIT, 1020 0, 0, &cached_state, GFP_NOFS); 1021 if (err == -ENOMEM) 1022 err = 0; 1023 if (!err) 1024 err = filemap_fdatawait_range(mapping, start, end); 1025 if (err) 1026 werr = err; 1027 free_extent_state(cached_state); 1028 cached_state = NULL; 1029 cond_resched(); 1030 start = end + 1; 1031 } 1032 if (err) 1033 werr = err; 1034 return werr; 1035 } 1036 1037 int btrfs_wait_extents(struct btrfs_fs_info *fs_info, 1038 struct extent_io_tree *dirty_pages) 1039 { 1040 bool errors = false; 1041 int err; 1042 1043 err = __btrfs_wait_marked_extents(fs_info, dirty_pages); 1044 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags)) 1045 errors = true; 1046 1047 if (errors && !err) 1048 err = -EIO; 1049 return err; 1050 } 1051 1052 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark) 1053 { 1054 struct btrfs_fs_info *fs_info = log_root->fs_info; 1055 struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages; 1056 bool errors = false; 1057 int err; 1058 1059 ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 1060 1061 err = __btrfs_wait_marked_extents(fs_info, dirty_pages); 1062 if ((mark & EXTENT_DIRTY) && 1063 test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags)) 1064 errors = true; 1065 1066 if ((mark & EXTENT_NEW) && 1067 test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags)) 1068 errors = true; 1069 1070 if (errors && !err) 1071 err = -EIO; 1072 return err; 1073 } 1074 1075 /* 1076 * when btree blocks are allocated, they have some corresponding bits set for 1077 * them in one of two extent_io trees. This is used to make sure all of 1078 * those extents are on disk for transaction or log commit 1079 */ 1080 static int btrfs_write_and_wait_marked_extents(struct btrfs_fs_info *fs_info, 1081 struct extent_io_tree *dirty_pages, int mark) 1082 { 1083 int ret; 1084 int ret2; 1085 struct blk_plug plug; 1086 1087 blk_start_plug(&plug); 1088 ret = btrfs_write_marked_extents(fs_info, dirty_pages, mark); 1089 blk_finish_plug(&plug); 1090 ret2 = btrfs_wait_extents(fs_info, dirty_pages); 1091 1092 if (ret) 1093 return ret; 1094 if (ret2) 1095 return ret2; 1096 return 0; 1097 } 1098 1099 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 1100 struct btrfs_fs_info *fs_info) 1101 { 1102 int ret; 1103 1104 ret = btrfs_write_and_wait_marked_extents(fs_info, 1105 &trans->transaction->dirty_pages, 1106 EXTENT_DIRTY); 1107 clear_btree_io_tree(&trans->transaction->dirty_pages); 1108 1109 return ret; 1110 } 1111 1112 /* 1113 * this is used to update the root pointer in the tree of tree roots. 1114 * 1115 * But, in the case of the extent allocation tree, updating the root 1116 * pointer may allocate blocks which may change the root of the extent 1117 * allocation tree. 1118 * 1119 * So, this loops and repeats and makes sure the cowonly root didn't 1120 * change while the root pointer was being updated in the metadata. 1121 */ 1122 static int update_cowonly_root(struct btrfs_trans_handle *trans, 1123 struct btrfs_root *root) 1124 { 1125 int ret; 1126 u64 old_root_bytenr; 1127 u64 old_root_used; 1128 struct btrfs_fs_info *fs_info = root->fs_info; 1129 struct btrfs_root *tree_root = fs_info->tree_root; 1130 1131 old_root_used = btrfs_root_used(&root->root_item); 1132 1133 while (1) { 1134 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 1135 if (old_root_bytenr == root->node->start && 1136 old_root_used == btrfs_root_used(&root->root_item)) 1137 break; 1138 1139 btrfs_set_root_node(&root->root_item, root->node); 1140 ret = btrfs_update_root(trans, tree_root, 1141 &root->root_key, 1142 &root->root_item); 1143 if (ret) 1144 return ret; 1145 1146 old_root_used = btrfs_root_used(&root->root_item); 1147 } 1148 1149 return 0; 1150 } 1151 1152 /* 1153 * update all the cowonly tree roots on disk 1154 * 1155 * The error handling in this function may not be obvious. Any of the 1156 * failures will cause the file system to go offline. We still need 1157 * to clean up the delayed refs. 1158 */ 1159 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 1160 struct btrfs_fs_info *fs_info) 1161 { 1162 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; 1163 struct list_head *io_bgs = &trans->transaction->io_bgs; 1164 struct list_head *next; 1165 struct extent_buffer *eb; 1166 int ret; 1167 1168 eb = btrfs_lock_root_node(fs_info->tree_root); 1169 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 1170 0, &eb); 1171 btrfs_tree_unlock(eb); 1172 free_extent_buffer(eb); 1173 1174 if (ret) 1175 return ret; 1176 1177 ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1); 1178 if (ret) 1179 return ret; 1180 1181 ret = btrfs_run_dev_stats(trans, fs_info); 1182 if (ret) 1183 return ret; 1184 ret = btrfs_run_dev_replace(trans, fs_info); 1185 if (ret) 1186 return ret; 1187 ret = btrfs_run_qgroups(trans, fs_info); 1188 if (ret) 1189 return ret; 1190 1191 ret = btrfs_setup_space_cache(trans, fs_info); 1192 if (ret) 1193 return ret; 1194 1195 /* run_qgroups might have added some more refs */ 1196 ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1); 1197 if (ret) 1198 return ret; 1199 again: 1200 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 1201 struct btrfs_root *root; 1202 next = fs_info->dirty_cowonly_roots.next; 1203 list_del_init(next); 1204 root = list_entry(next, struct btrfs_root, dirty_list); 1205 clear_bit(BTRFS_ROOT_DIRTY, &root->state); 1206 1207 if (root != fs_info->extent_root) 1208 list_add_tail(&root->dirty_list, 1209 &trans->transaction->switch_commits); 1210 ret = update_cowonly_root(trans, root); 1211 if (ret) 1212 return ret; 1213 ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1); 1214 if (ret) 1215 return ret; 1216 } 1217 1218 while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) { 1219 ret = btrfs_write_dirty_block_groups(trans, fs_info); 1220 if (ret) 1221 return ret; 1222 ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1); 1223 if (ret) 1224 return ret; 1225 } 1226 1227 if (!list_empty(&fs_info->dirty_cowonly_roots)) 1228 goto again; 1229 1230 list_add_tail(&fs_info->extent_root->dirty_list, 1231 &trans->transaction->switch_commits); 1232 btrfs_after_dev_replace_commit(fs_info); 1233 1234 return 0; 1235 } 1236 1237 /* 1238 * dead roots are old snapshots that need to be deleted. This allocates 1239 * a dirty root struct and adds it into the list of dead roots that need to 1240 * be deleted 1241 */ 1242 void btrfs_add_dead_root(struct btrfs_root *root) 1243 { 1244 struct btrfs_fs_info *fs_info = root->fs_info; 1245 1246 spin_lock(&fs_info->trans_lock); 1247 if (list_empty(&root->root_list)) 1248 list_add_tail(&root->root_list, &fs_info->dead_roots); 1249 spin_unlock(&fs_info->trans_lock); 1250 } 1251 1252 /* 1253 * update all the cowonly tree roots on disk 1254 */ 1255 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, 1256 struct btrfs_fs_info *fs_info) 1257 { 1258 struct btrfs_root *gang[8]; 1259 int i; 1260 int ret; 1261 int err = 0; 1262 1263 spin_lock(&fs_info->fs_roots_radix_lock); 1264 while (1) { 1265 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 1266 (void **)gang, 0, 1267 ARRAY_SIZE(gang), 1268 BTRFS_ROOT_TRANS_TAG); 1269 if (ret == 0) 1270 break; 1271 for (i = 0; i < ret; i++) { 1272 struct btrfs_root *root = gang[i]; 1273 radix_tree_tag_clear(&fs_info->fs_roots_radix, 1274 (unsigned long)root->root_key.objectid, 1275 BTRFS_ROOT_TRANS_TAG); 1276 spin_unlock(&fs_info->fs_roots_radix_lock); 1277 1278 btrfs_free_log(trans, root); 1279 btrfs_update_reloc_root(trans, root); 1280 btrfs_orphan_commit_root(trans, root); 1281 1282 btrfs_save_ino_cache(root, trans); 1283 1284 /* see comments in should_cow_block() */ 1285 clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1286 smp_mb__after_atomic(); 1287 1288 if (root->commit_root != root->node) { 1289 list_add_tail(&root->dirty_list, 1290 &trans->transaction->switch_commits); 1291 btrfs_set_root_node(&root->root_item, 1292 root->node); 1293 } 1294 1295 err = btrfs_update_root(trans, fs_info->tree_root, 1296 &root->root_key, 1297 &root->root_item); 1298 spin_lock(&fs_info->fs_roots_radix_lock); 1299 if (err) 1300 break; 1301 btrfs_qgroup_free_meta_all(root); 1302 } 1303 } 1304 spin_unlock(&fs_info->fs_roots_radix_lock); 1305 return err; 1306 } 1307 1308 /* 1309 * defrag a given btree. 1310 * Every leaf in the btree is read and defragged. 1311 */ 1312 int btrfs_defrag_root(struct btrfs_root *root) 1313 { 1314 struct btrfs_fs_info *info = root->fs_info; 1315 struct btrfs_trans_handle *trans; 1316 int ret; 1317 1318 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state)) 1319 return 0; 1320 1321 while (1) { 1322 trans = btrfs_start_transaction(root, 0); 1323 if (IS_ERR(trans)) 1324 return PTR_ERR(trans); 1325 1326 ret = btrfs_defrag_leaves(trans, root); 1327 1328 btrfs_end_transaction(trans); 1329 btrfs_btree_balance_dirty(info); 1330 cond_resched(); 1331 1332 if (btrfs_fs_closing(info) || ret != -EAGAIN) 1333 break; 1334 1335 if (btrfs_defrag_cancelled(info)) { 1336 btrfs_debug(info, "defrag_root cancelled"); 1337 ret = -EAGAIN; 1338 break; 1339 } 1340 } 1341 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state); 1342 return ret; 1343 } 1344 1345 /* 1346 * Do all special snapshot related qgroup dirty hack. 1347 * 1348 * Will do all needed qgroup inherit and dirty hack like switch commit 1349 * roots inside one transaction and write all btree into disk, to make 1350 * qgroup works. 1351 */ 1352 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, 1353 struct btrfs_root *src, 1354 struct btrfs_root *parent, 1355 struct btrfs_qgroup_inherit *inherit, 1356 u64 dst_objectid) 1357 { 1358 struct btrfs_fs_info *fs_info = src->fs_info; 1359 int ret; 1360 1361 /* 1362 * Save some performance in the case that qgroups are not 1363 * enabled. If this check races with the ioctl, rescan will 1364 * kick in anyway. 1365 */ 1366 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 1367 return 0; 1368 1369 /* 1370 * We are going to commit transaction, see btrfs_commit_transaction() 1371 * comment for reason locking tree_log_mutex 1372 */ 1373 mutex_lock(&fs_info->tree_log_mutex); 1374 1375 ret = commit_fs_roots(trans, fs_info); 1376 if (ret) 1377 goto out; 1378 ret = btrfs_qgroup_account_extents(trans, fs_info); 1379 if (ret < 0) 1380 goto out; 1381 1382 /* Now qgroup are all updated, we can inherit it to new qgroups */ 1383 ret = btrfs_qgroup_inherit(trans, fs_info, 1384 src->root_key.objectid, dst_objectid, 1385 inherit); 1386 if (ret < 0) 1387 goto out; 1388 1389 /* 1390 * Now we do a simplified commit transaction, which will: 1391 * 1) commit all subvolume and extent tree 1392 * To ensure all subvolume and extent tree have a valid 1393 * commit_root to accounting later insert_dir_item() 1394 * 2) write all btree blocks onto disk 1395 * This is to make sure later btree modification will be cowed 1396 * Or commit_root can be populated and cause wrong qgroup numbers 1397 * In this simplified commit, we don't really care about other trees 1398 * like chunk and root tree, as they won't affect qgroup. 1399 * And we don't write super to avoid half committed status. 1400 */ 1401 ret = commit_cowonly_roots(trans, fs_info); 1402 if (ret) 1403 goto out; 1404 switch_commit_roots(trans->transaction, fs_info); 1405 ret = btrfs_write_and_wait_transaction(trans, fs_info); 1406 if (ret) 1407 btrfs_handle_fs_error(fs_info, ret, 1408 "Error while writing out transaction for qgroup"); 1409 1410 out: 1411 mutex_unlock(&fs_info->tree_log_mutex); 1412 1413 /* 1414 * Force parent root to be updated, as we recorded it before so its 1415 * last_trans == cur_transid. 1416 * Or it won't be committed again onto disk after later 1417 * insert_dir_item() 1418 */ 1419 if (!ret) 1420 record_root_in_trans(trans, parent, 1); 1421 return ret; 1422 } 1423 1424 /* 1425 * new snapshots need to be created at a very specific time in the 1426 * transaction commit. This does the actual creation. 1427 * 1428 * Note: 1429 * If the error which may affect the commitment of the current transaction 1430 * happens, we should return the error number. If the error which just affect 1431 * the creation of the pending snapshots, just return 0. 1432 */ 1433 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 1434 struct btrfs_fs_info *fs_info, 1435 struct btrfs_pending_snapshot *pending) 1436 { 1437 struct btrfs_key key; 1438 struct btrfs_root_item *new_root_item; 1439 struct btrfs_root *tree_root = fs_info->tree_root; 1440 struct btrfs_root *root = pending->root; 1441 struct btrfs_root *parent_root; 1442 struct btrfs_block_rsv *rsv; 1443 struct inode *parent_inode; 1444 struct btrfs_path *path; 1445 struct btrfs_dir_item *dir_item; 1446 struct dentry *dentry; 1447 struct extent_buffer *tmp; 1448 struct extent_buffer *old; 1449 struct timespec cur_time; 1450 int ret = 0; 1451 u64 to_reserve = 0; 1452 u64 index = 0; 1453 u64 objectid; 1454 u64 root_flags; 1455 uuid_le new_uuid; 1456 1457 ASSERT(pending->path); 1458 path = pending->path; 1459 1460 ASSERT(pending->root_item); 1461 new_root_item = pending->root_item; 1462 1463 pending->error = btrfs_find_free_objectid(tree_root, &objectid); 1464 if (pending->error) 1465 goto no_free_objectid; 1466 1467 /* 1468 * Make qgroup to skip current new snapshot's qgroupid, as it is 1469 * accounted by later btrfs_qgroup_inherit(). 1470 */ 1471 btrfs_set_skip_qgroup(trans, objectid); 1472 1473 btrfs_reloc_pre_snapshot(pending, &to_reserve); 1474 1475 if (to_reserve > 0) { 1476 pending->error = btrfs_block_rsv_add(root, 1477 &pending->block_rsv, 1478 to_reserve, 1479 BTRFS_RESERVE_NO_FLUSH); 1480 if (pending->error) 1481 goto clear_skip_qgroup; 1482 } 1483 1484 key.objectid = objectid; 1485 key.offset = (u64)-1; 1486 key.type = BTRFS_ROOT_ITEM_KEY; 1487 1488 rsv = trans->block_rsv; 1489 trans->block_rsv = &pending->block_rsv; 1490 trans->bytes_reserved = trans->block_rsv->reserved; 1491 trace_btrfs_space_reservation(fs_info, "transaction", 1492 trans->transid, 1493 trans->bytes_reserved, 1); 1494 dentry = pending->dentry; 1495 parent_inode = pending->dir; 1496 parent_root = BTRFS_I(parent_inode)->root; 1497 record_root_in_trans(trans, parent_root, 0); 1498 1499 cur_time = current_time(parent_inode); 1500 1501 /* 1502 * insert the directory item 1503 */ 1504 ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index); 1505 BUG_ON(ret); /* -ENOMEM */ 1506 1507 /* check if there is a file/dir which has the same name. */ 1508 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, 1509 btrfs_ino(BTRFS_I(parent_inode)), 1510 dentry->d_name.name, 1511 dentry->d_name.len, 0); 1512 if (dir_item != NULL && !IS_ERR(dir_item)) { 1513 pending->error = -EEXIST; 1514 goto dir_item_existed; 1515 } else if (IS_ERR(dir_item)) { 1516 ret = PTR_ERR(dir_item); 1517 btrfs_abort_transaction(trans, ret); 1518 goto fail; 1519 } 1520 btrfs_release_path(path); 1521 1522 /* 1523 * pull in the delayed directory update 1524 * and the delayed inode item 1525 * otherwise we corrupt the FS during 1526 * snapshot 1527 */ 1528 ret = btrfs_run_delayed_items(trans, fs_info); 1529 if (ret) { /* Transaction aborted */ 1530 btrfs_abort_transaction(trans, ret); 1531 goto fail; 1532 } 1533 1534 record_root_in_trans(trans, root, 0); 1535 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 1536 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 1537 btrfs_check_and_init_root_item(new_root_item); 1538 1539 root_flags = btrfs_root_flags(new_root_item); 1540 if (pending->readonly) 1541 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 1542 else 1543 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 1544 btrfs_set_root_flags(new_root_item, root_flags); 1545 1546 btrfs_set_root_generation_v2(new_root_item, 1547 trans->transid); 1548 uuid_le_gen(&new_uuid); 1549 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE); 1550 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1551 BTRFS_UUID_SIZE); 1552 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) { 1553 memset(new_root_item->received_uuid, 0, 1554 sizeof(new_root_item->received_uuid)); 1555 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1556 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1557 btrfs_set_root_stransid(new_root_item, 0); 1558 btrfs_set_root_rtransid(new_root_item, 0); 1559 } 1560 btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec); 1561 btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec); 1562 btrfs_set_root_otransid(new_root_item, trans->transid); 1563 1564 old = btrfs_lock_root_node(root); 1565 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old); 1566 if (ret) { 1567 btrfs_tree_unlock(old); 1568 free_extent_buffer(old); 1569 btrfs_abort_transaction(trans, ret); 1570 goto fail; 1571 } 1572 1573 btrfs_set_lock_blocking(old); 1574 1575 ret = btrfs_copy_root(trans, root, old, &tmp, objectid); 1576 /* clean up in any case */ 1577 btrfs_tree_unlock(old); 1578 free_extent_buffer(old); 1579 if (ret) { 1580 btrfs_abort_transaction(trans, ret); 1581 goto fail; 1582 } 1583 /* see comments in should_cow_block() */ 1584 set_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1585 smp_wmb(); 1586 1587 btrfs_set_root_node(new_root_item, tmp); 1588 /* record when the snapshot was created in key.offset */ 1589 key.offset = trans->transid; 1590 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1591 btrfs_tree_unlock(tmp); 1592 free_extent_buffer(tmp); 1593 if (ret) { 1594 btrfs_abort_transaction(trans, ret); 1595 goto fail; 1596 } 1597 1598 /* 1599 * insert root back/forward references 1600 */ 1601 ret = btrfs_add_root_ref(trans, fs_info, objectid, 1602 parent_root->root_key.objectid, 1603 btrfs_ino(BTRFS_I(parent_inode)), index, 1604 dentry->d_name.name, dentry->d_name.len); 1605 if (ret) { 1606 btrfs_abort_transaction(trans, ret); 1607 goto fail; 1608 } 1609 1610 key.offset = (u64)-1; 1611 pending->snap = btrfs_read_fs_root_no_name(fs_info, &key); 1612 if (IS_ERR(pending->snap)) { 1613 ret = PTR_ERR(pending->snap); 1614 btrfs_abort_transaction(trans, ret); 1615 goto fail; 1616 } 1617 1618 ret = btrfs_reloc_post_snapshot(trans, pending); 1619 if (ret) { 1620 btrfs_abort_transaction(trans, ret); 1621 goto fail; 1622 } 1623 1624 ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1); 1625 if (ret) { 1626 btrfs_abort_transaction(trans, ret); 1627 goto fail; 1628 } 1629 1630 /* 1631 * Do special qgroup accounting for snapshot, as we do some qgroup 1632 * snapshot hack to do fast snapshot. 1633 * To co-operate with that hack, we do hack again. 1634 * Or snapshot will be greatly slowed down by a subtree qgroup rescan 1635 */ 1636 ret = qgroup_account_snapshot(trans, root, parent_root, 1637 pending->inherit, objectid); 1638 if (ret < 0) 1639 goto fail; 1640 1641 ret = btrfs_insert_dir_item(trans, parent_root, 1642 dentry->d_name.name, dentry->d_name.len, 1643 BTRFS_I(parent_inode), &key, 1644 BTRFS_FT_DIR, index); 1645 /* We have check then name at the beginning, so it is impossible. */ 1646 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); 1647 if (ret) { 1648 btrfs_abort_transaction(trans, ret); 1649 goto fail; 1650 } 1651 1652 btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size + 1653 dentry->d_name.len * 2); 1654 parent_inode->i_mtime = parent_inode->i_ctime = 1655 current_time(parent_inode); 1656 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode); 1657 if (ret) { 1658 btrfs_abort_transaction(trans, ret); 1659 goto fail; 1660 } 1661 ret = btrfs_uuid_tree_add(trans, fs_info, new_uuid.b, 1662 BTRFS_UUID_KEY_SUBVOL, objectid); 1663 if (ret) { 1664 btrfs_abort_transaction(trans, ret); 1665 goto fail; 1666 } 1667 if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) { 1668 ret = btrfs_uuid_tree_add(trans, fs_info, 1669 new_root_item->received_uuid, 1670 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 1671 objectid); 1672 if (ret && ret != -EEXIST) { 1673 btrfs_abort_transaction(trans, ret); 1674 goto fail; 1675 } 1676 } 1677 1678 ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1); 1679 if (ret) { 1680 btrfs_abort_transaction(trans, ret); 1681 goto fail; 1682 } 1683 1684 fail: 1685 pending->error = ret; 1686 dir_item_existed: 1687 trans->block_rsv = rsv; 1688 trans->bytes_reserved = 0; 1689 clear_skip_qgroup: 1690 btrfs_clear_skip_qgroup(trans); 1691 no_free_objectid: 1692 kfree(new_root_item); 1693 pending->root_item = NULL; 1694 btrfs_free_path(path); 1695 pending->path = NULL; 1696 1697 return ret; 1698 } 1699 1700 /* 1701 * create all the snapshots we've scheduled for creation 1702 */ 1703 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, 1704 struct btrfs_fs_info *fs_info) 1705 { 1706 struct btrfs_pending_snapshot *pending, *next; 1707 struct list_head *head = &trans->transaction->pending_snapshots; 1708 int ret = 0; 1709 1710 list_for_each_entry_safe(pending, next, head, list) { 1711 list_del(&pending->list); 1712 ret = create_pending_snapshot(trans, fs_info, pending); 1713 if (ret) 1714 break; 1715 } 1716 return ret; 1717 } 1718 1719 static void update_super_roots(struct btrfs_fs_info *fs_info) 1720 { 1721 struct btrfs_root_item *root_item; 1722 struct btrfs_super_block *super; 1723 1724 super = fs_info->super_copy; 1725 1726 root_item = &fs_info->chunk_root->root_item; 1727 super->chunk_root = root_item->bytenr; 1728 super->chunk_root_generation = root_item->generation; 1729 super->chunk_root_level = root_item->level; 1730 1731 root_item = &fs_info->tree_root->root_item; 1732 super->root = root_item->bytenr; 1733 super->generation = root_item->generation; 1734 super->root_level = root_item->level; 1735 if (btrfs_test_opt(fs_info, SPACE_CACHE)) 1736 super->cache_generation = root_item->generation; 1737 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags)) 1738 super->uuid_tree_generation = root_item->generation; 1739 } 1740 1741 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1742 { 1743 struct btrfs_transaction *trans; 1744 int ret = 0; 1745 1746 spin_lock(&info->trans_lock); 1747 trans = info->running_transaction; 1748 if (trans) 1749 ret = (trans->state >= TRANS_STATE_COMMIT_START); 1750 spin_unlock(&info->trans_lock); 1751 return ret; 1752 } 1753 1754 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1755 { 1756 struct btrfs_transaction *trans; 1757 int ret = 0; 1758 1759 spin_lock(&info->trans_lock); 1760 trans = info->running_transaction; 1761 if (trans) 1762 ret = is_transaction_blocked(trans); 1763 spin_unlock(&info->trans_lock); 1764 return ret; 1765 } 1766 1767 /* 1768 * wait for the current transaction commit to start and block subsequent 1769 * transaction joins 1770 */ 1771 static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info, 1772 struct btrfs_transaction *trans) 1773 { 1774 wait_event(fs_info->transaction_blocked_wait, 1775 trans->state >= TRANS_STATE_COMMIT_START || trans->aborted); 1776 } 1777 1778 /* 1779 * wait for the current transaction to start and then become unblocked. 1780 * caller holds ref. 1781 */ 1782 static void wait_current_trans_commit_start_and_unblock( 1783 struct btrfs_fs_info *fs_info, 1784 struct btrfs_transaction *trans) 1785 { 1786 wait_event(fs_info->transaction_wait, 1787 trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted); 1788 } 1789 1790 /* 1791 * commit transactions asynchronously. once btrfs_commit_transaction_async 1792 * returns, any subsequent transaction will not be allowed to join. 1793 */ 1794 struct btrfs_async_commit { 1795 struct btrfs_trans_handle *newtrans; 1796 struct work_struct work; 1797 }; 1798 1799 static void do_async_commit(struct work_struct *work) 1800 { 1801 struct btrfs_async_commit *ac = 1802 container_of(work, struct btrfs_async_commit, work); 1803 1804 /* 1805 * We've got freeze protection passed with the transaction. 1806 * Tell lockdep about it. 1807 */ 1808 if (ac->newtrans->type & __TRANS_FREEZABLE) 1809 __sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS); 1810 1811 current->journal_info = ac->newtrans; 1812 1813 btrfs_commit_transaction(ac->newtrans); 1814 kfree(ac); 1815 } 1816 1817 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1818 int wait_for_unblock) 1819 { 1820 struct btrfs_fs_info *fs_info = trans->fs_info; 1821 struct btrfs_async_commit *ac; 1822 struct btrfs_transaction *cur_trans; 1823 1824 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1825 if (!ac) 1826 return -ENOMEM; 1827 1828 INIT_WORK(&ac->work, do_async_commit); 1829 ac->newtrans = btrfs_join_transaction(trans->root); 1830 if (IS_ERR(ac->newtrans)) { 1831 int err = PTR_ERR(ac->newtrans); 1832 kfree(ac); 1833 return err; 1834 } 1835 1836 /* take transaction reference */ 1837 cur_trans = trans->transaction; 1838 refcount_inc(&cur_trans->use_count); 1839 1840 btrfs_end_transaction(trans); 1841 1842 /* 1843 * Tell lockdep we've released the freeze rwsem, since the 1844 * async commit thread will be the one to unlock it. 1845 */ 1846 if (ac->newtrans->type & __TRANS_FREEZABLE) 1847 __sb_writers_release(fs_info->sb, SB_FREEZE_FS); 1848 1849 schedule_work(&ac->work); 1850 1851 /* wait for transaction to start and unblock */ 1852 if (wait_for_unblock) 1853 wait_current_trans_commit_start_and_unblock(fs_info, cur_trans); 1854 else 1855 wait_current_trans_commit_start(fs_info, cur_trans); 1856 1857 if (current->journal_info == trans) 1858 current->journal_info = NULL; 1859 1860 btrfs_put_transaction(cur_trans); 1861 return 0; 1862 } 1863 1864 1865 static void cleanup_transaction(struct btrfs_trans_handle *trans, 1866 struct btrfs_root *root, int err) 1867 { 1868 struct btrfs_fs_info *fs_info = root->fs_info; 1869 struct btrfs_transaction *cur_trans = trans->transaction; 1870 DEFINE_WAIT(wait); 1871 1872 WARN_ON(trans->use_count > 1); 1873 1874 btrfs_abort_transaction(trans, err); 1875 1876 spin_lock(&fs_info->trans_lock); 1877 1878 /* 1879 * If the transaction is removed from the list, it means this 1880 * transaction has been committed successfully, so it is impossible 1881 * to call the cleanup function. 1882 */ 1883 BUG_ON(list_empty(&cur_trans->list)); 1884 1885 list_del_init(&cur_trans->list); 1886 if (cur_trans == fs_info->running_transaction) { 1887 cur_trans->state = TRANS_STATE_COMMIT_DOING; 1888 spin_unlock(&fs_info->trans_lock); 1889 wait_event(cur_trans->writer_wait, 1890 atomic_read(&cur_trans->num_writers) == 1); 1891 1892 spin_lock(&fs_info->trans_lock); 1893 } 1894 spin_unlock(&fs_info->trans_lock); 1895 1896 btrfs_cleanup_one_transaction(trans->transaction, fs_info); 1897 1898 spin_lock(&fs_info->trans_lock); 1899 if (cur_trans == fs_info->running_transaction) 1900 fs_info->running_transaction = NULL; 1901 spin_unlock(&fs_info->trans_lock); 1902 1903 if (trans->type & __TRANS_FREEZABLE) 1904 sb_end_intwrite(fs_info->sb); 1905 btrfs_put_transaction(cur_trans); 1906 btrfs_put_transaction(cur_trans); 1907 1908 trace_btrfs_transaction_commit(root); 1909 1910 if (current->journal_info == trans) 1911 current->journal_info = NULL; 1912 btrfs_scrub_cancel(fs_info); 1913 1914 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1915 } 1916 1917 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) 1918 { 1919 /* 1920 * We use writeback_inodes_sb here because if we used 1921 * btrfs_start_delalloc_roots we would deadlock with fs freeze. 1922 * Currently are holding the fs freeze lock, if we do an async flush 1923 * we'll do btrfs_join_transaction() and deadlock because we need to 1924 * wait for the fs freeze lock. Using the direct flushing we benefit 1925 * from already being in a transaction and our join_transaction doesn't 1926 * have to re-take the fs freeze lock. 1927 */ 1928 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) 1929 writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); 1930 return 0; 1931 } 1932 1933 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) 1934 { 1935 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) 1936 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); 1937 } 1938 1939 static inline void 1940 btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans) 1941 { 1942 wait_event(cur_trans->pending_wait, 1943 atomic_read(&cur_trans->pending_ordered) == 0); 1944 } 1945 1946 int btrfs_commit_transaction(struct btrfs_trans_handle *trans) 1947 { 1948 struct btrfs_fs_info *fs_info = trans->fs_info; 1949 struct btrfs_transaction *cur_trans = trans->transaction; 1950 struct btrfs_transaction *prev_trans = NULL; 1951 int ret; 1952 1953 /* Stop the commit early if ->aborted is set */ 1954 if (unlikely(READ_ONCE(cur_trans->aborted))) { 1955 ret = cur_trans->aborted; 1956 btrfs_end_transaction(trans); 1957 return ret; 1958 } 1959 1960 /* make a pass through all the delayed refs we have so far 1961 * any runnings procs may add more while we are here 1962 */ 1963 ret = btrfs_run_delayed_refs(trans, fs_info, 0); 1964 if (ret) { 1965 btrfs_end_transaction(trans); 1966 return ret; 1967 } 1968 1969 btrfs_trans_release_metadata(trans, fs_info); 1970 trans->block_rsv = NULL; 1971 1972 cur_trans = trans->transaction; 1973 1974 /* 1975 * set the flushing flag so procs in this transaction have to 1976 * start sending their work down. 1977 */ 1978 cur_trans->delayed_refs.flushing = 1; 1979 smp_wmb(); 1980 1981 if (!list_empty(&trans->new_bgs)) 1982 btrfs_create_pending_block_groups(trans, fs_info); 1983 1984 ret = btrfs_run_delayed_refs(trans, fs_info, 0); 1985 if (ret) { 1986 btrfs_end_transaction(trans); 1987 return ret; 1988 } 1989 1990 if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) { 1991 int run_it = 0; 1992 1993 /* this mutex is also taken before trying to set 1994 * block groups readonly. We need to make sure 1995 * that nobody has set a block group readonly 1996 * after a extents from that block group have been 1997 * allocated for cache files. btrfs_set_block_group_ro 1998 * will wait for the transaction to commit if it 1999 * finds BTRFS_TRANS_DIRTY_BG_RUN set. 2000 * 2001 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure 2002 * only one process starts all the block group IO. It wouldn't 2003 * hurt to have more than one go through, but there's no 2004 * real advantage to it either. 2005 */ 2006 mutex_lock(&fs_info->ro_block_group_mutex); 2007 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN, 2008 &cur_trans->flags)) 2009 run_it = 1; 2010 mutex_unlock(&fs_info->ro_block_group_mutex); 2011 2012 if (run_it) 2013 ret = btrfs_start_dirty_block_groups(trans, fs_info); 2014 } 2015 if (ret) { 2016 btrfs_end_transaction(trans); 2017 return ret; 2018 } 2019 2020 spin_lock(&fs_info->trans_lock); 2021 if (cur_trans->state >= TRANS_STATE_COMMIT_START) { 2022 spin_unlock(&fs_info->trans_lock); 2023 refcount_inc(&cur_trans->use_count); 2024 ret = btrfs_end_transaction(trans); 2025 2026 wait_for_commit(cur_trans); 2027 2028 if (unlikely(cur_trans->aborted)) 2029 ret = cur_trans->aborted; 2030 2031 btrfs_put_transaction(cur_trans); 2032 2033 return ret; 2034 } 2035 2036 cur_trans->state = TRANS_STATE_COMMIT_START; 2037 wake_up(&fs_info->transaction_blocked_wait); 2038 2039 if (cur_trans->list.prev != &fs_info->trans_list) { 2040 prev_trans = list_entry(cur_trans->list.prev, 2041 struct btrfs_transaction, list); 2042 if (prev_trans->state != TRANS_STATE_COMPLETED) { 2043 refcount_inc(&prev_trans->use_count); 2044 spin_unlock(&fs_info->trans_lock); 2045 2046 wait_for_commit(prev_trans); 2047 ret = prev_trans->aborted; 2048 2049 btrfs_put_transaction(prev_trans); 2050 if (ret) 2051 goto cleanup_transaction; 2052 } else { 2053 spin_unlock(&fs_info->trans_lock); 2054 } 2055 } else { 2056 spin_unlock(&fs_info->trans_lock); 2057 } 2058 2059 extwriter_counter_dec(cur_trans, trans->type); 2060 2061 ret = btrfs_start_delalloc_flush(fs_info); 2062 if (ret) 2063 goto cleanup_transaction; 2064 2065 ret = btrfs_run_delayed_items(trans, fs_info); 2066 if (ret) 2067 goto cleanup_transaction; 2068 2069 wait_event(cur_trans->writer_wait, 2070 extwriter_counter_read(cur_trans) == 0); 2071 2072 /* some pending stuffs might be added after the previous flush. */ 2073 ret = btrfs_run_delayed_items(trans, fs_info); 2074 if (ret) 2075 goto cleanup_transaction; 2076 2077 btrfs_wait_delalloc_flush(fs_info); 2078 2079 btrfs_wait_pending_ordered(cur_trans); 2080 2081 btrfs_scrub_pause(fs_info); 2082 /* 2083 * Ok now we need to make sure to block out any other joins while we 2084 * commit the transaction. We could have started a join before setting 2085 * COMMIT_DOING so make sure to wait for num_writers to == 1 again. 2086 */ 2087 spin_lock(&fs_info->trans_lock); 2088 cur_trans->state = TRANS_STATE_COMMIT_DOING; 2089 spin_unlock(&fs_info->trans_lock); 2090 wait_event(cur_trans->writer_wait, 2091 atomic_read(&cur_trans->num_writers) == 1); 2092 2093 /* ->aborted might be set after the previous check, so check it */ 2094 if (unlikely(READ_ONCE(cur_trans->aborted))) { 2095 ret = cur_trans->aborted; 2096 goto scrub_continue; 2097 } 2098 /* 2099 * the reloc mutex makes sure that we stop 2100 * the balancing code from coming in and moving 2101 * extents around in the middle of the commit 2102 */ 2103 mutex_lock(&fs_info->reloc_mutex); 2104 2105 /* 2106 * We needn't worry about the delayed items because we will 2107 * deal with them in create_pending_snapshot(), which is the 2108 * core function of the snapshot creation. 2109 */ 2110 ret = create_pending_snapshots(trans, fs_info); 2111 if (ret) { 2112 mutex_unlock(&fs_info->reloc_mutex); 2113 goto scrub_continue; 2114 } 2115 2116 /* 2117 * We insert the dir indexes of the snapshots and update the inode 2118 * of the snapshots' parents after the snapshot creation, so there 2119 * are some delayed items which are not dealt with. Now deal with 2120 * them. 2121 * 2122 * We needn't worry that this operation will corrupt the snapshots, 2123 * because all the tree which are snapshoted will be forced to COW 2124 * the nodes and leaves. 2125 */ 2126 ret = btrfs_run_delayed_items(trans, fs_info); 2127 if (ret) { 2128 mutex_unlock(&fs_info->reloc_mutex); 2129 goto scrub_continue; 2130 } 2131 2132 ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1); 2133 if (ret) { 2134 mutex_unlock(&fs_info->reloc_mutex); 2135 goto scrub_continue; 2136 } 2137 2138 /* 2139 * make sure none of the code above managed to slip in a 2140 * delayed item 2141 */ 2142 btrfs_assert_delayed_root_empty(fs_info); 2143 2144 WARN_ON(cur_trans != trans->transaction); 2145 2146 /* btrfs_commit_tree_roots is responsible for getting the 2147 * various roots consistent with each other. Every pointer 2148 * in the tree of tree roots has to point to the most up to date 2149 * root for every subvolume and other tree. So, we have to keep 2150 * the tree logging code from jumping in and changing any 2151 * of the trees. 2152 * 2153 * At this point in the commit, there can't be any tree-log 2154 * writers, but a little lower down we drop the trans mutex 2155 * and let new people in. By holding the tree_log_mutex 2156 * from now until after the super is written, we avoid races 2157 * with the tree-log code. 2158 */ 2159 mutex_lock(&fs_info->tree_log_mutex); 2160 2161 ret = commit_fs_roots(trans, fs_info); 2162 if (ret) { 2163 mutex_unlock(&fs_info->tree_log_mutex); 2164 mutex_unlock(&fs_info->reloc_mutex); 2165 goto scrub_continue; 2166 } 2167 2168 /* 2169 * Since the transaction is done, we can apply the pending changes 2170 * before the next transaction. 2171 */ 2172 btrfs_apply_pending_changes(fs_info); 2173 2174 /* commit_fs_roots gets rid of all the tree log roots, it is now 2175 * safe to free the root of tree log roots 2176 */ 2177 btrfs_free_log_root_tree(trans, fs_info); 2178 2179 /* 2180 * commit_fs_roots() can call btrfs_save_ino_cache(), which generates 2181 * new delayed refs. Must handle them or qgroup can be wrong. 2182 */ 2183 ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1); 2184 if (ret) { 2185 mutex_unlock(&fs_info->tree_log_mutex); 2186 mutex_unlock(&fs_info->reloc_mutex); 2187 goto scrub_continue; 2188 } 2189 2190 /* 2191 * Since fs roots are all committed, we can get a quite accurate 2192 * new_roots. So let's do quota accounting. 2193 */ 2194 ret = btrfs_qgroup_account_extents(trans, fs_info); 2195 if (ret < 0) { 2196 mutex_unlock(&fs_info->tree_log_mutex); 2197 mutex_unlock(&fs_info->reloc_mutex); 2198 goto scrub_continue; 2199 } 2200 2201 ret = commit_cowonly_roots(trans, fs_info); 2202 if (ret) { 2203 mutex_unlock(&fs_info->tree_log_mutex); 2204 mutex_unlock(&fs_info->reloc_mutex); 2205 goto scrub_continue; 2206 } 2207 2208 /* 2209 * The tasks which save the space cache and inode cache may also 2210 * update ->aborted, check it. 2211 */ 2212 if (unlikely(READ_ONCE(cur_trans->aborted))) { 2213 ret = cur_trans->aborted; 2214 mutex_unlock(&fs_info->tree_log_mutex); 2215 mutex_unlock(&fs_info->reloc_mutex); 2216 goto scrub_continue; 2217 } 2218 2219 btrfs_prepare_extent_commit(fs_info); 2220 2221 cur_trans = fs_info->running_transaction; 2222 2223 btrfs_set_root_node(&fs_info->tree_root->root_item, 2224 fs_info->tree_root->node); 2225 list_add_tail(&fs_info->tree_root->dirty_list, 2226 &cur_trans->switch_commits); 2227 2228 btrfs_set_root_node(&fs_info->chunk_root->root_item, 2229 fs_info->chunk_root->node); 2230 list_add_tail(&fs_info->chunk_root->dirty_list, 2231 &cur_trans->switch_commits); 2232 2233 switch_commit_roots(cur_trans, fs_info); 2234 2235 ASSERT(list_empty(&cur_trans->dirty_bgs)); 2236 ASSERT(list_empty(&cur_trans->io_bgs)); 2237 update_super_roots(fs_info); 2238 2239 btrfs_set_super_log_root(fs_info->super_copy, 0); 2240 btrfs_set_super_log_root_level(fs_info->super_copy, 0); 2241 memcpy(fs_info->super_for_commit, fs_info->super_copy, 2242 sizeof(*fs_info->super_copy)); 2243 2244 btrfs_update_commit_device_size(fs_info); 2245 btrfs_update_commit_device_bytes_used(fs_info, cur_trans); 2246 2247 clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags); 2248 clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags); 2249 2250 btrfs_trans_release_chunk_metadata(trans); 2251 2252 spin_lock(&fs_info->trans_lock); 2253 cur_trans->state = TRANS_STATE_UNBLOCKED; 2254 fs_info->running_transaction = NULL; 2255 spin_unlock(&fs_info->trans_lock); 2256 mutex_unlock(&fs_info->reloc_mutex); 2257 2258 wake_up(&fs_info->transaction_wait); 2259 2260 ret = btrfs_write_and_wait_transaction(trans, fs_info); 2261 if (ret) { 2262 btrfs_handle_fs_error(fs_info, ret, 2263 "Error while writing out transaction"); 2264 mutex_unlock(&fs_info->tree_log_mutex); 2265 goto scrub_continue; 2266 } 2267 2268 ret = write_all_supers(fs_info, 0); 2269 if (ret) { 2270 mutex_unlock(&fs_info->tree_log_mutex); 2271 goto scrub_continue; 2272 } 2273 2274 /* 2275 * the super is written, we can safely allow the tree-loggers 2276 * to go about their business 2277 */ 2278 mutex_unlock(&fs_info->tree_log_mutex); 2279 2280 btrfs_finish_extent_commit(trans, fs_info); 2281 2282 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags)) 2283 btrfs_clear_space_info_full(fs_info); 2284 2285 fs_info->last_trans_committed = cur_trans->transid; 2286 /* 2287 * We needn't acquire the lock here because there is no other task 2288 * which can change it. 2289 */ 2290 cur_trans->state = TRANS_STATE_COMPLETED; 2291 wake_up(&cur_trans->commit_wait); 2292 2293 spin_lock(&fs_info->trans_lock); 2294 list_del_init(&cur_trans->list); 2295 spin_unlock(&fs_info->trans_lock); 2296 2297 btrfs_put_transaction(cur_trans); 2298 btrfs_put_transaction(cur_trans); 2299 2300 if (trans->type & __TRANS_FREEZABLE) 2301 sb_end_intwrite(fs_info->sb); 2302 2303 trace_btrfs_transaction_commit(trans->root); 2304 2305 btrfs_scrub_continue(fs_info); 2306 2307 if (current->journal_info == trans) 2308 current->journal_info = NULL; 2309 2310 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2311 2312 /* 2313 * If fs has been frozen, we can not handle delayed iputs, otherwise 2314 * it'll result in deadlock about SB_FREEZE_FS. 2315 */ 2316 if (current != fs_info->transaction_kthread && 2317 current != fs_info->cleaner_kthread && 2318 !test_bit(BTRFS_FS_FROZEN, &fs_info->flags)) 2319 btrfs_run_delayed_iputs(fs_info); 2320 2321 return ret; 2322 2323 scrub_continue: 2324 btrfs_scrub_continue(fs_info); 2325 cleanup_transaction: 2326 btrfs_trans_release_metadata(trans, fs_info); 2327 btrfs_trans_release_chunk_metadata(trans); 2328 trans->block_rsv = NULL; 2329 btrfs_warn(fs_info, "Skipping commit of aborted transaction."); 2330 if (current->journal_info == trans) 2331 current->journal_info = NULL; 2332 cleanup_transaction(trans, trans->root, ret); 2333 2334 return ret; 2335 } 2336 2337 /* 2338 * return < 0 if error 2339 * 0 if there are no more dead_roots at the time of call 2340 * 1 there are more to be processed, call me again 2341 * 2342 * The return value indicates there are certainly more snapshots to delete, but 2343 * if there comes a new one during processing, it may return 0. We don't mind, 2344 * because btrfs_commit_super will poke cleaner thread and it will process it a 2345 * few seconds later. 2346 */ 2347 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root) 2348 { 2349 int ret; 2350 struct btrfs_fs_info *fs_info = root->fs_info; 2351 2352 spin_lock(&fs_info->trans_lock); 2353 if (list_empty(&fs_info->dead_roots)) { 2354 spin_unlock(&fs_info->trans_lock); 2355 return 0; 2356 } 2357 root = list_first_entry(&fs_info->dead_roots, 2358 struct btrfs_root, root_list); 2359 list_del_init(&root->root_list); 2360 spin_unlock(&fs_info->trans_lock); 2361 2362 btrfs_debug(fs_info, "cleaner removing %llu", root->objectid); 2363 2364 btrfs_kill_all_delayed_nodes(root); 2365 2366 if (btrfs_header_backref_rev(root->node) < 2367 BTRFS_MIXED_BACKREF_REV) 2368 ret = btrfs_drop_snapshot(root, NULL, 0, 0); 2369 else 2370 ret = btrfs_drop_snapshot(root, NULL, 1, 0); 2371 2372 return (ret < 0) ? 0 : 1; 2373 } 2374 2375 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info) 2376 { 2377 unsigned long prev; 2378 unsigned long bit; 2379 2380 prev = xchg(&fs_info->pending_changes, 0); 2381 if (!prev) 2382 return; 2383 2384 bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE; 2385 if (prev & bit) 2386 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE); 2387 prev &= ~bit; 2388 2389 bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE; 2390 if (prev & bit) 2391 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE); 2392 prev &= ~bit; 2393 2394 bit = 1 << BTRFS_PENDING_COMMIT; 2395 if (prev & bit) 2396 btrfs_debug(fs_info, "pending commit done"); 2397 prev &= ~bit; 2398 2399 if (prev) 2400 btrfs_warn(fs_info, 2401 "unknown pending changes left 0x%lx, ignoring", prev); 2402 } 2403